input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert len(img.bytes_) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert len(img.bytes_) > 0
|
from sqlalchemy.orm import Session
from sqlalchemy import Engine, exc, sql
def check_db_availability(engine: Engine, check_vector: bool = False) -> None:
try:
with engine.connect() as conn:
if check_vector:
conn.execute(sql.text("""SELECT Vec_Dims("[1]");"""))
else:
conn.execute(sql.text("""SELECT 1;"""))
except exc.DatabaseError as e:
db_error_code = e.orig.args[0]
if db_error_code == 1045:
raise ValueError(
"Could not connect to the TiDB server. "
"Please check if the connection string is correct."
) from e
elif db_error_code == 1305:
raise ValueError(
"Please confirm if your TiDB supports vector search. "
"You can check this by running the query `SELECT Vec_Dims('[1]')` in TiDB."
) from e
else:
raise ValueError(
"An error occurred while checking the database availability."
) from e
def get_or_create(session: Session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance, True
def remove_empty_values(input_dict):
"""
Remove entries with empty values from the dictionary.
Parameters
----------
input_dict (dict): The dictionary from which empty values need to be removed.
Returns
-------
dict: A new dictionary with all empty values removed.
"""
# Create a new dictionary excluding empty values
return {key: value for key, value in input_dict.items() if value}
|
from sqlalchemy.orm import Session
from sqlalchemy import Engine, exc, sql
def check_db_availability(engine: Engine, check_vector: bool = False) -> None:
try:
with engine.connect() as conn:
if check_vector:
conn.execute(sql.text("""SELECT Vec_Dims("[1]");"""))
else:
conn.execute(sql.text("""SELECT 1;"""))
except exc.DatabaseError as e:
db_error_code = e.orig.args[0]
if db_error_code == 1045:
raise ValueError(
"Could not connect to the TiDB server. "
"Please check if the connection string is correct."
) from e
elif db_error_code == 1305:
raise ValueError(
"Please confirm if your TiDB supports vector search. "
"You can check this by running the query `SELECT Vec_Dims('[1]')` in TiDB."
) from e
else:
raise ValueError(
"An error occurred while checking the database availability."
) from e
def get_or_create(session: Session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance, True
def remove_empty_values(input_dict):
"""
Remove entries with empty values from the dictionary.
Parameters:
input_dict (dict): The dictionary from which empty values need to be removed.
Returns:
dict: A new dictionary with all empty values removed.
"""
# Create a new dictionary excluding empty values
return {key: value for key, value in input_dict.items() if value}
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
graph_version: int
class ExecuteGraphResponse(pydantic.BaseModel):
graph_exec_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
class RequestTopUp(pydantic.BaseModel):
credit_amount: int
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
class RequestTopUp(pydantic.BaseModel):
credit_amount: int
|
import PIL.Image
import pytest
import torch
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision.prototype import features
from torchvision.prototype.transforms.functional import to_image_pil
from torchvision.prototype.transforms.utils import has_all, has_any
IMAGE = make_image(color_space=features.ColorSpace.RGB)
BOUNDING_BOX = make_bounding_box(format=features.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (features.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox, features.Mask), True),
((MASK,), (features.Image, features.BoundingBox), False),
((BOUNDING_BOX,), (features.Image, features.Mask), False),
((IMAGE,), (features.BoundingBox, features.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(features.Image, features.BoundingBox, features.Mask),
True,
),
((), (features.Image, features.BoundingBox, features.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, features.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (features.Image, PIL.Image.Image, features.is_simple_tensor), True),
((torch.Tensor(IMAGE),), (features.Image, PIL.Image.Image, features.is_simple_tensor), True),
((to_image_pil(IMAGE),), (features.Image, PIL.Image.Image, features.is_simple_tensor), True),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (features.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox, features.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(features.Image, features.BoundingBox, features.Mask),
True,
),
((BOUNDING_BOX, MASK), (features.Image, features.BoundingBox), False),
((BOUNDING_BOX, MASK), (features.Image, features.Mask), False),
((IMAGE, MASK), (features.BoundingBox, features.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(features.Image, features.BoundingBox, features.Mask),
True,
),
((BOUNDING_BOX, MASK), (features.Image, features.BoundingBox, features.Mask), False),
((IMAGE, MASK), (features.Image, features.BoundingBox, features.Mask), False),
((IMAGE, BOUNDING_BOX), (features.Image, features.BoundingBox, features.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (features.Image, features.BoundingBox, features.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
import PIL.Image
import pytest
import torch
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision.prototype import features
from torchvision.prototype.transforms._utils import has_all, has_any
from torchvision.prototype.transforms.functional import to_image_pil
IMAGE = make_image(color_space=features.ColorSpace.RGB)
BOUNDING_BOX = make_bounding_box(format=features.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (features.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox, features.Mask), True),
((MASK,), (features.Image, features.BoundingBox), False),
((BOUNDING_BOX,), (features.Image, features.Mask), False),
((IMAGE,), (features.BoundingBox, features.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(features.Image, features.BoundingBox, features.Mask),
True,
),
((), (features.Image, features.BoundingBox, features.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, features.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (features.Image, PIL.Image.Image, features.is_simple_tensor), True),
((torch.Tensor(IMAGE),), (features.Image, PIL.Image.Image, features.is_simple_tensor), True),
((to_image_pil(IMAGE),), (features.Image, PIL.Image.Image, features.is_simple_tensor), True),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (features.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (features.Image, features.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (features.BoundingBox, features.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(features.Image, features.BoundingBox, features.Mask),
True,
),
((BOUNDING_BOX, MASK), (features.Image, features.BoundingBox), False),
((BOUNDING_BOX, MASK), (features.Image, features.Mask), False),
((IMAGE, MASK), (features.BoundingBox, features.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(features.Image, features.BoundingBox, features.Mask),
True,
),
((BOUNDING_BOX, MASK), (features.Image, features.BoundingBox, features.Mask), False),
((IMAGE, MASK), (features.Image, features.BoundingBox, features.Mask), False),
((IMAGE, BOUNDING_BOX), (features.Image, features.BoundingBox, features.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (features.Image, features.BoundingBox, features.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
from pydantic import BaseModel
from typing import Any, AsyncGenerator, List
from llama_index.llms.nvidia import NVIDIA as Interface
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.program import FunctionCallingProgram
import pytest
from llama_index.llms.nvidia.utils import (
MODEL_TABLE,
)
from openai.types.completion import Completion, CompletionUsage
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
Choice,
ChoiceLogprobs,
)
from unittest.mock import MagicMock, patch
NVIDIA_STRUCT_OUT_MODELS = []
for model in MODEL_TABLE.values():
if model.supports_structured_output:
NVIDIA_STRUCT_OUT_MODELS.append(model.id)
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
def create_mock_chat_completion_v1_response(model: str) -> ChatCompletion:
return ChatCompletion(
id="chatcmpl-4162e407-e121-42b4-8590-1c173380be7d",
object="chat.completion",
model=model,
created=1713474384,
usage=CompletionUsage(
completion_tokens=304, prompt_tokens=11, total_tokens=315
),
choices=[
Choice(
finish_reason="stop",
index=0,
logprobs=ChoiceLogprobs(
content=None,
text_offset=[],
token_logprobs=[0.0, 0.0],
tokens=[],
top_logprobs=[],
),
message=ChatCompletionMessage(
content="""{
"name": "Greatest Hits",
"artist": "Best Artist",
"songs": [
{"title": "Hit Song 1", "length_seconds": 180},
{"title": "Hit Song 2", "length_seconds": 210}
]
}""",
role="assistant",
function_call=None,
tool_calls=None,
),
)
],
)
async def mock_async_chat_completion_stream_v1(
*args: Any, **kwargs: Any
) -> AsyncGenerator[Completion, None]:
async def gen() -> AsyncGenerator[Completion, None]:
for response in create_mock_chat_completion_v1_response(*args, **kwargs):
yield response
return gen()
# @respx.mock
@patch("llama_index.llms.openai.base.SyncOpenAI")
@pytest.mark.parametrize("model", NVIDIA_STRUCT_OUT_MODELS)
def test_prompt_generation(MockSyncOpenAI: MagicMock, model):
mock_instance = MockSyncOpenAI.return_value
mock_instance.chat.completions.create.return_value = (
create_mock_chat_completion_v1_response(model)
)
llm = Interface(api_key="BOGUS", model=model)
program = LLMTextCompletionProgram.from_defaults(
output_cls=Album, prompt_template_str=prompt_template_str, verbose=True, llm=llm
)
assert llm.metadata is not None
output = program(movie_name="The Shining")
assert isinstance(output, Album), f"Expected Album, but got {type(output)}"
assert isinstance(output.name, str), "Name should be a string"
assert isinstance(output.artist, str), "artist should be a string"
assert isinstance(output.songs, list), "Songs should be a list"
assert all(
isinstance(song, Song) for song in output.songs
), "All songs should be of type Song"
assert len(output.songs) > 0, "Album should contain at least one song"
@pytest.mark.parametrize("model", MODEL_TABLE.keys() - NVIDIA_STRUCT_OUT_MODELS)
def test_unsupported_models(model: str):
llm = Interface(api_key="BOGUS", model=model)
with pytest.raises(ValueError) as e:
FunctionCallingProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
verbose=True,
llm=llm,
)
assert f"{model} does not support function calling API." in str(e.value)
|
import respx
from httpx import Response
from pydantic import BaseModel
from typing import List
from llama_index.llms.nvidia import NVIDIA as Interface
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.program import FunctionCallingProgram
import pytest
from llama_index.llms.nvidia.utils import (
NVIDIA_FUNTION_CALLING_MODELS,
API_CATALOG_MODELS,
)
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
def create_mock_chat_completion_v1_response() -> dict:
return {
"id": "chatcmpl-4162e407-e121-42b4-8590-1c173380be7d",
"object": "chat.completion",
"created": 1713474384,
"model": "mocked-model",
"usage": {"completion_tokens": 304, "prompt_tokens": 11, "total_tokens": 315},
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": {
"content": None,
"text_offset": [],
"token_logprobs": [0.0, 0.0],
"tokens": [],
"top_logprobs": [],
},
"message": {
"content": """{
"name": "Greatest Hits",
"artist": "Best Artist",
"songs": [
{"title": "Hit Song 1", "length_seconds": 180},
{"title": "Hit Song 2", "length_seconds": 210}
]
}""",
"role": "assistant",
"function_call": None,
"tool_calls": None,
},
}
],
}
@respx.mock
@pytest.mark.parametrize("model", NVIDIA_FUNTION_CALLING_MODELS)
def test_prompt_generation(model):
respx.post("https://integrate.api.nvidia.com/v1/chat/completions").mock(
return_value=Response(200, json=create_mock_chat_completion_v1_response())
)
llm = Interface(api_key="BOGUS", model=model)
program = LLMTextCompletionProgram.from_defaults(
output_cls=Album, prompt_template_str=prompt_template_str, verbose=True, llm=llm
)
output = program(movie_name="The Shining")
assert isinstance(output, Album), f"Expected Album, but got {type(output)}"
assert isinstance(output.name, str), "Name should be a string"
assert isinstance(output.artist, str), "artist should be a string"
assert isinstance(output.songs, list), "Songs should be a list"
assert all(
isinstance(song, Song) for song in output.songs
), "All songs should be of type Song"
assert len(output.songs) > 0, "Album should contain at least one song"
@pytest.mark.parametrize(
"model", API_CATALOG_MODELS.keys() - NVIDIA_FUNTION_CALLING_MODELS
)
def test_unsupported_models(model: str):
llm = Interface(api_key="BOGUS", model=model)
with pytest.raises(ValueError) as e:
FunctionCallingProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
verbose=True,
llm=llm,
)
assert f"{model} does not support function calling API." in str(e.value)
@pytest.mark.asyncio()
@respx.mock
@pytest.mark.parametrize("model", NVIDIA_FUNTION_CALLING_MODELS)
async def test_async_program(model) -> None:
respx.post("https://integrate.api.nvidia.com/v1/chat/completions").mock(
return_value=Response(200, json=create_mock_chat_completion_v1_response())
)
llm = Interface(api_key="BOGUS", model=model)
program = LLMTextCompletionProgram.from_defaults(
output_cls=Album, prompt_template_str=prompt_template_str, verbose=True, llm=llm
)
output = program(movie_name="The Shining")
assert isinstance(output, Album), f"Expected Album, but got {type(output)}"
assert isinstance(output.name, str), "Name should be a string"
assert isinstance(output.artist, str), "artist should be a string"
assert isinstance(output.songs, list), "Songs should be a list"
assert all(
isinstance(song, Song) for song in output.songs
), "All songs should be of type Song"
assert len(output.songs) > 0, "Album should contain at least one song"
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict
if TYPE_CHECKING:
import numpy as np
from ...typing import ArrayType
from ... import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
filter: Optional[Dict] = None,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param filter: filter query used for pre-filtering
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
filter=filter,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
import numpy as np
from ...typing import ArrayType
from ... import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead
def test_gfl_head_loss():
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_dfl_loss.item() == 0, (
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_dfl_loss.item() > 0, 'dfl loss should be non-zero'
|
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead
def test_gfl_head_loss():
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_dfl_loss.item() == 0, (
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_dfl_loss.item() > 0, 'dfl loss should be non-zero'
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class TFMT5ModelIntegrationTest(unittest.TestCase):
@slow
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFAutoModelForSeq2SeqLM.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_mean(loss).numpy()
EXPECTED_SCORE = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class TFMT5ModelIntegrationTest(unittest.TestCase):
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFAutoModelForSeq2SeqLM.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_mean(loss).numpy()
EXPECTED_SCORE = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
|
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilbert-base-uncased"
train_batch_size = 128
num_epochs = 1
max_seq_length = 32
# Save path to store our model
model_save_path = "output/training_stsb_simcse-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Here we define our SentenceTransformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_samples = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = int(len(train_dataloader) * 0.1) # Evaluate every 10% of the data
logging.info("Training sentences: {}".format(len(train_samples)))
logging.info("Warmup-steps: {}".format(warmup_steps))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
warmup_steps=warmup_steps,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilbert-base-uncased"
train_batch_size = 128
num_epochs = 1
max_seq_length = 32
# Save path to store our model
model_save_path = "output/training_stsb_simcse-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Here we define our SentenceTransformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_samples = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = int(len(train_dataloader) * 0.1) # Evaluate every 10% of the data
logging.info("Training sentences: {}".format(len(train_samples)))
logging.info("Warmup-steps: {}".format(warmup_steps))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
warmup_steps=warmup_steps,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
file_client_args=file_client_args,
)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/', seg=None),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/', seg=None),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
|
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
|
# mypy: allow-untyped-defs
from dataclasses import dataclass
from typing import Callable
import torch
import torch.fx.node
import torch.utils._pytree as pytree
from torch._ops import HigherOrderOperator
def is_graphable(val) -> bool:
"""Definition: a graphable type is a type that that is an acceptable input/output type to a FX node."""
return isinstance(val, torch.fx.node.base_types)
def is_graphable_type(typ) -> bool:
"""Return whether the given type is graphable"""
return issubclass(typ, torch.fx.node.base_types)
def to_graphable(stuff):
"""Flattens stuff into a flat list of graphable types."""
# We can consider preserving things like List[int] to improve
# perf and readability (right now that is all flattened out)
flat_args, spec = pytree.tree_flatten(stuff)
for arg in flat_args:
if not is_graphable(arg):
raise RuntimeError(
f"Expected all pytree.tree_leaves of (args, kwargs) to be graphable types, but found "
f"non-fx-graphable type {type(arg)}. If this type is meant to be constant, mark it as "
f"via pytree.register_constant; otherwise, register it as a pytree."
)
return flat_args, spec
def from_graphable(flat_args, spec):
"""The inverse of to_graphable."""
stuff = pytree.tree_unflatten(flat_args, spec)
return stuff
def func_to_graphable(func):
"""
Pack and flatten a function type into graphable types.
This is useful for legalizing the function argument of `flat_apply`.
"""
return pytree.tree_flatten(_ConstantFunction(func))
@dataclass(frozen=True)
class _ConstantFunction:
func: Callable
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
pytree.register_constant(_ConstantFunction)
_op_types = (
torch._ops.OpOverload,
torch._ops.OpOverloadPacket,
torch._ops.HigherOrderOperator,
)
class FlatApply(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("flat_apply")
def __call__(self, func, in_spec, *flat_args, **_unused):
"""
Functions that take in non-graphable types cannot directly be put into FX graph.
Given func(*args, **kwargs), if all of the non-graphable types are pytrees,
then we're able to store a call to flat_apply(func, in_spec, *flat_args) in the FX graph.
The semantics of flat_apply(func, in_spec, *flat_args) are roughly equivalent to:
>>> def flat_apply_impl(func, in_spec, *flat_args):
>>> args, kwargs = pytree.tree_unflatten(flat_args, in_spec)
>>> output = func(*args, **kwargs)
>>> return output
flat_apply supports the following two cases:
- an input type is a container type (e.g. of tensors) registered as a pytree.
We'll tree_flatten the input type and store the spec.
- an input type is a constant type (i.e. torch.compile will specialize on it)
registered with pytree.register_constant. The constant type goes directly
into the spec.
"""
assert isinstance(func, _op_types) or pytree._is_constant_holder(func)
assert len(_unused) == 0
return impl(func, in_spec, *flat_args)
def impl(func, in_spec, *flat_args):
if not isinstance(func, _op_types):
# assume _ConstantFunction
func = pytree._retrieve_constant(func)
assert isinstance(func, _ConstantFunction)
args, kwargs = from_graphable(flat_args, in_spec)
out = func(*args, **kwargs)
# Right now, all outputs must either be graphable or lists/tuples of graphables.
#
# TODO: The following can be updated to support non-graphable outputs and pytrees.
# For non-graphable constant outputs: the assumption would be that they are constant
# (every time the function runs those MUST be the same)
# For pytree outputs:
# I'm not sure if we need to return (flat_output, spec) or just (flat_output,):
# in the latter case the tracers need to carry out the output specs
# (they need to know how to reconstruct the object from just the flat_output).
def is_valid_output(x):
if isinstance(x, (tuple, list)):
return all(map(is_valid_output, x))
return is_graphable(x)
assert is_valid_output(out)
return out
flat_apply = FlatApply()
|
# mypy: allow-untyped-defs
from dataclasses import dataclass
from typing import Callable
import torch
import torch.fx.node
import torch.utils._pytree as pytree
from torch._ops import HigherOrderOperator
def is_graphable(val) -> bool:
"""Definition: a graphable type is a type that that is an acceptable input/output type to a FX node."""
return isinstance(val, torch.fx.node.base_types)
def is_graphable_type(typ) -> bool:
"""Return whether the given type is graphable"""
return issubclass(typ, torch.fx.node.base_types)
def to_graphable(stuff):
"""Flattens stuff into a flat list of graphable types."""
# We can consider preserving things like List[int] to improve
# perf and readability (right now that is all flattened out)
flat_args, spec = pytree.tree_flatten(stuff)
for arg in flat_args:
if not is_graphable(arg):
raise RuntimeError(
f"Expected all pytree.tree_leaves of (args, kwargs) to be graphable types, but found "
f"non-fx-graphable type {type(arg)}. If this type is meant to be constant, mark it as "
f"via pytree.register_constant; otherwise, register it as a pytree."
)
return flat_args, spec
def from_graphable(flat_args, spec):
"""The inverse of to_graphable."""
stuff = pytree.tree_unflatten(flat_args, spec)
return stuff
def func_to_graphable(func):
"""
Pack and flatten a function type into graphable types.
This is useful for legalizing the function argument of `flat_apply`.
"""
return pytree.tree_flatten(_ConstantFunction(func))
@dataclass(frozen=True)
class _ConstantFunction:
func: Callable
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
pytree.register_constant(_ConstantFunction)
_op_types = (
torch._ops.OpOverload,
torch._ops.OpOverloadPacket,
torch._ops.HigherOrderOperator,
)
class FlatApply(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("flat_apply")
def __call__(self, func, in_spec, *flat_args, **_unused):
"""
Functions that take in non-graphable types cannot directly be put into FX graph.
Given func(*args, **kwargs), if all of the non-graphable types are pytrees,
then we're able to store a call to flat_apply(func, in_spec, *flat_args) in the FX graph.
The semantics of flat_apply(func, in_spec, *flat_args) are roughly equivalent to:
>>> def flat_apply_impl(func, in_spec, *flat_args):
>>> args, kwargs = pytree.tree_unflatten(flat_args, in_spec)
>>> output = func(*args, **kwargs)
>>> return output
flat_apply supports the following two cases:
- an input type is a container type (e.g. of tensors) registered as a pytree.
We'll tree_flatten the input type and store the spec.
- an input type is a constant type (i.e. torch.compile will specialize on it)
registered with pytree.register_constant. The constant type goes directly
into the spec.
"""
assert isinstance(func, _op_types) or pytree._is_constant_holder(func)
assert len(_unused) == 0
return impl(func, in_spec, *flat_args)
def impl(func, in_spec, *flat_args):
if not isinstance(func, _op_types):
# assume _ConstantFunction
func = pytree._retrieve_constant(func)
assert isinstance(func, _ConstantFunction)
args, kwargs = from_graphable(flat_args, in_spec)
out = func(*args, **kwargs)
# Right now, all outputs must either be graphable or lists/tuples of graphables.
#
# TODO: The following can be updated to support non-graphable outputs and pytrees.
# For non-graphable constant outputs: the assumption would be that they are constant
# (everytime the function runs those MUST be the same)
# For pytree outputs:
# I'm not sure if we need to return (flat_output, spec) or just (flat_output,):
# in the latter case the tracers need to carry out the output specs
# (they need to know how to reconstruct the object from just the flat_output).
def is_valid_output(x):
if isinstance(x, (tuple, list)):
return all(map(is_valid_output, x))
return is_graphable(x)
assert is_valid_output(out)
return out
flat_apply = FlatApply()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR'
]
|
from urllib.parse import parse_qs, urlparse
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TranscribeYoutubeVideoBlock(Block):
class Input(BlockSchema):
youtube_url: str = SchemaField(
title="YouTube URL",
description="The URL of the YouTube video to transcribe",
placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
)
class Output(BlockSchema):
video_id: str = SchemaField(description="The extracted YouTube video ID")
transcript: str = SchemaField(description="The transcribed text of the video")
error: str = SchemaField(
description="Any error message if the transcription fails"
)
def __init__(self):
super().__init__(
id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c",
input_schema=TranscribeYoutubeVideoBlock.Input,
output_schema=TranscribeYoutubeVideoBlock.Output,
description="Transcribes a YouTube video.",
categories={BlockCategory.SOCIAL},
test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"},
test_output=[
("video_id", "dQw4w9WgXcQ"),
(
"transcript",
"Never gonna give you up\nNever gonna let you down",
),
],
test_mock={
"get_transcript": lambda video_id: [
{"text": "Never gonna give you up"},
{"text": "Never gonna let you down"},
],
},
)
@staticmethod
def extract_video_id(url: str) -> str:
parsed_url = urlparse(url)
if parsed_url.netloc == "youtu.be":
return parsed_url.path[1:]
if parsed_url.netloc in ("www.youtube.com", "youtube.com"):
if parsed_url.path == "/watch":
p = parse_qs(parsed_url.query)
return p["v"][0]
if parsed_url.path[:7] == "/embed/":
return parsed_url.path.split("/")[2]
if parsed_url.path[:3] == "/v/":
return parsed_url.path.split("/")[2]
raise ValueError(f"Invalid YouTube URL: {url}")
@staticmethod
def get_transcript(video_id: str):
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
if not transcript_list:
raise ValueError(f"No transcripts found for the video: {video_id}")
for transcript in transcript_list:
first_transcript = transcript_list.find_transcript(
[transcript.language_code]
)
return YouTubeTranscriptApi.get_transcript(
video_id, languages=[first_transcript.language_code]
)
except Exception:
raise ValueError(f"No transcripts found for the video: {video_id}")
def run(self, input_data: Input, **kwargs) -> BlockOutput:
video_id = self.extract_video_id(input_data.youtube_url)
yield "video_id", video_id
transcript = self.get_transcript(video_id)
formatter = TextFormatter()
transcript_text = formatter.format_transcript(transcript)
yield "transcript", transcript_text
|
from urllib.parse import parse_qs, urlparse
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TranscribeYoutubeVideoBlock(Block):
class Input(BlockSchema):
youtube_url: str = SchemaField(
title="YouTube URL",
description="The URL of the YouTube video to transcribe",
placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
)
class Output(BlockSchema):
video_id: str = SchemaField(description="The extracted YouTube video ID")
transcript: str = SchemaField(description="The transcribed text of the video")
error: str = SchemaField(
description="Any error message if the transcription fails"
)
def __init__(self):
super().__init__(
id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c",
input_schema=TranscribeYoutubeVideoBlock.Input,
output_schema=TranscribeYoutubeVideoBlock.Output,
description="Transcribes a YouTube video.",
categories={BlockCategory.SOCIAL},
test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"},
test_output=[
("video_id", "dQw4w9WgXcQ"),
(
"transcript",
"Never gonna give you up\nNever gonna let you down",
),
],
test_mock={
"get_transcript": lambda video_id: [
{"text": "Never gonna give you up"},
{"text": "Never gonna let you down"},
],
},
)
@staticmethod
def extract_video_id(url: str) -> str:
parsed_url = urlparse(url)
if parsed_url.netloc == "youtu.be":
return parsed_url.path[1:]
if parsed_url.netloc in ("www.youtube.com", "youtube.com"):
if parsed_url.path == "/watch":
p = parse_qs(parsed_url.query)
return p["v"][0]
if parsed_url.path[:7] == "/embed/":
return parsed_url.path.split("/")[2]
if parsed_url.path[:3] == "/v/":
return parsed_url.path.split("/")[2]
raise ValueError(f"Invalid YouTube URL: {url}")
@staticmethod
def get_transcript(video_id: str):
return YouTubeTranscriptApi.get_transcript(video_id)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
video_id = self.extract_video_id(input_data.youtube_url)
yield "video_id", video_id
transcript = self.get_transcript(video_id)
formatter = TextFormatter()
transcript_text = formatter.format_transcript(transcript)
yield "transcript", transcript_text
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Mask(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Mask(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
|
_base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
|
_base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_label=False),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes']),
]
data = dict(train=dict(pipeline=train_pipeline))
evaluation = dict(interval=1, metric='proposal_fast')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""Convert a tensor or an ndarray to PIL Image
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""Convert all TVTensors to pure tensors, removing associated metadata (if any).
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""Convert a tensor or an ndarray to PIL Image
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""Convert all TVTensors to pure tensors, removing associated metadata (if any).
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
import asyncio
import time
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_deployment(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
class WaitStreamExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(5):
yield Document(text=f'{doc.text} {i}')
await asyncio.sleep(0.5)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_delay(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=WaitStreamExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
start_time = time.time()
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
# 0.5 seconds between each request + 0.5 seconds tolerance interval
assert time.time() - start_time < (0.5 * i) + 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
|
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_deployment(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
|
__version__ = '2023.01.18.alpha'
from docarray.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
__version__ = '2023.01.17.alpha'
from docarray.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
import orjson
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
if isinstance(obj, AbstractTensor):
return obj._docarray_to_json_compatible()
else:
return obj
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to bytes using orjson
return orjson_dumps(v, default=default).decode()
|
import orjson
def _default_orjson(obj):
"""
default option for orjson dumps. It will call _to_json_compatible
from docarray typing object that expose such method.
:param obj:
:return: return a json compatible object
"""
if getattr(obj, '_to_json_compatible'):
return obj._to_json_compatible()
else:
return obj
def orjson_dumps(v, *, default=None):
# orjson.dumps returns bytes, to match standard json.dumps we need to decode
return orjson.dumps(
v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY
).decode()
|
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, datasets, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "bert-base-uncased"
train_batch_size = 8
num_epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/training_stsb_tsdae-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Defining our sentence transformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_sentences = []
with open(wikipedia_dataset_path, encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
evaluation_steps = 1000
logging.info(f"Training sentences: {len(train_sentences)}")
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
output_path=model_save_path,
weight_decay=0,
warmup_steps=100,
optimizer_params={"lr": 3e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, datasets, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "bert-base-uncased"
train_batch_size = 8
num_epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/training_stsb_tsdae-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Defining our sentence transformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
evaluation_steps = 1000
logging.info("Training sentences: {}".format(len(train_sentences)))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
output_path=model_save_path,
weight_decay=0,
warmup_steps=100,
optimizer_params={"lr": 3e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
import unittest
import torch
from diffusers import DDIMInverseScheduler
from .test_schedulers import SchedulerCommonTest
class DDIMInverseSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDIMInverseScheduler,)
forward_default_kwargs = (("num_inference_steps", 50),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps, torch.LongTensor([1, 201, 401, 601, 801]))
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
@unittest.skip("Test not supported.")
def test_add_noise_device(self):
pass
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 671.6816) < 1e-2
assert abs(result_mean.item() - 0.8746) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1394.2185) < 1e-2
assert abs(result_mean.item() - 1.8154) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 539.9622) < 1e-2
assert abs(result_mean.item() - 0.7031) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 542.6722) < 1e-2
assert abs(result_mean.item() - 0.7066) < 1e-3
|
import torch
from diffusers import DDIMInverseScheduler
from .test_schedulers import SchedulerCommonTest
class DDIMInverseSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDIMInverseScheduler,)
forward_default_kwargs = (("num_inference_steps", 50),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps, torch.LongTensor([1, 201, 401, 601, 801]))
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_add_noise_device(self):
pass
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 671.6816) < 1e-2
assert abs(result_mean.item() - 0.8746) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1394.2185) < 1e-2
assert abs(result_mean.item() - 1.8154) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 539.9622) < 1e-2
assert abs(result_mean.item() - 0.7031) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 542.6722) < 1e-2
assert abs(result_mean.item() - 0.7066) < 1e-3
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
_SAMPLE_RATE = 16000
def _get_wavs_paths(data_dir):
wav_dir = data_dir / "sentences" / "wav"
wav_paths = sorted(str(p) for p in wav_dir.glob("*/*.wav"))
relative_paths = []
for wav_path in wav_paths:
start = wav_path.find("Session")
wav_path = wav_path[start:]
relative_paths.append(wav_path)
return relative_paths
class IEMOCAP(Dataset):
"""*IEMOCAP* :cite:`iemocap` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found
sessions (Tuple[int]): Tuple of sessions (1-5) to use. (Default: ``(1, 2, 3, 4, 5)``)
utterance_type (str or None, optional): Which type(s) of utterances to include in the dataset.
Options: ("scripted", "improvised", ``None``). If ``None``, both scripted and improvised
data are used.
"""
def __init__(
self,
root: Union[str, Path],
sessions: Tuple[str] = (1, 2, 3, 4, 5),
utterance_type: Optional[str] = None,
):
root = Path(root)
self._path = root / "IEMOCAP"
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
if utterance_type not in ["scripted", "improvised", None]:
raise ValueError("utterance_type must be one of ['scripted', 'improvised', or None]")
all_data = []
self.data = []
self.mapping = {}
for session in sessions:
session_name = f"Session{session}"
session_dir = self._path / session_name
# get wav paths
wav_paths = _get_wavs_paths(session_dir)
for wav_path in wav_paths:
wav_stem = str(Path(wav_path).stem)
all_data.append(wav_stem)
# add labels
label_dir = session_dir / "dialog" / "EmoEvaluation"
query = "*.txt"
if utterance_type == "scripted":
query = "*script*.txt"
elif utterance_type == "improvised":
query = "*impro*.txt"
label_paths = label_dir.glob(query)
for label_path in label_paths:
with open(label_path, "r") as f:
for line in f:
if not line.startswith("["):
continue
line = re.split("[\t\n]", line)
wav_stem = line[1]
label = line[2]
if wav_stem not in all_data:
continue
if label not in ["neu", "hap", "ang", "sad", "exc", "fru"]:
continue
self.mapping[wav_stem] = {}
self.mapping[wav_stem]["label"] = label
for wav_path in wav_paths:
wav_stem = str(Path(wav_path).stem)
if wav_stem in self.mapping:
self.data.append(wav_stem)
self.mapping[wav_stem]["path"] = wav_path
def get_metadata(self, n: int) -> Tuple[str, int, str, str, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:meth:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
str:
Path to audio
int:
Sample rate
str:
File name
str:
Label (one of ``"neu"``, ``"hap"``, ``"ang"``, ``"sad"``, ``"exc"``, ``"fru"``)
str:
Speaker
"""
wav_stem = self.data[n]
wav_path = self.mapping[wav_stem]["path"]
label = self.mapping[wav_stem]["label"]
speaker = wav_stem.split("_")[0]
return (wav_path, _SAMPLE_RATE, wav_stem, label, speaker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
File name
str:
Label (one of ``"neu"``, ``"hap"``, ``"ang"``, ``"sad"``, ``"exc"``, ``"fru"``)
str:
Speaker
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
def __len__(self):
return len(self.data)
|
import os
import re
from pathlib import Path
from typing import Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
_SAMPLE_RATE = 16000
def _get_wavs_paths(data_dir):
wav_dir = data_dir / "sentences" / "wav"
wav_paths = sorted(str(p) for p in wav_dir.glob("*/*.wav"))
relative_paths = []
for wav_path in wav_paths:
start = wav_path.find("Session")
wav_path = wav_path[start:]
relative_paths.append(wav_path)
return relative_paths
class IEMOCAP(Dataset):
"""*IEMOCAP* :cite:`iemocap` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found
sessions (Tuple[int]): Tuple of sessions (1-5) to use. (Default: ``(1, 2, 3, 4, 5)``)
"""
def __init__(
self,
root: Union[str, Path],
sessions: Tuple[str] = (1, 2, 3, 4, 5),
):
root = Path(root)
self._path = root / "IEMOCAP"
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
all_data = []
self.data = []
self.mapping = {}
for session in sessions:
session_name = f"Session{session}"
session_dir = self._path / session_name
# get wav paths
wav_paths = _get_wavs_paths(session_dir)
for wav_path in wav_paths:
wav_stem = str(Path(wav_path).stem)
all_data.append(wav_stem)
# add labels
label_dir = session_dir / "dialog" / "EmoEvaluation"
label_paths = label_dir.glob("*.txt")
for label_path in label_paths:
with open(label_path, "r") as f:
for line in f:
if not line.startswith("["):
continue
line = re.split("[\t\n]", line)
wav_stem = line[1]
label = line[2]
if label == "exc":
label = "hap"
if wav_stem not in all_data:
continue
if label not in ["neu", "hap", "ang", "sad"]:
continue
self.mapping[wav_stem] = {}
self.mapping[wav_stem]["label"] = label
for wav_path in wav_paths:
wav_stem = str(Path(wav_path).stem)
if wav_stem in self.mapping:
self.data.append(wav_stem)
self.mapping[wav_stem]["path"] = wav_path
def get_metadata(self, n: int) -> Tuple[str, int, str, str, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:meth:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
str:
Path to audio
int:
Sample rate
str:
File name
str:
Label (one of ``"neu"``, ``"hap"``, ``"ang"``, ``"sad"``)
str:
Speaker
"""
wav_stem = self.data[n]
wav_path = self.mapping[wav_stem]["path"]
label = self.mapping[wav_stem]["label"]
speaker = wav_stem.split("_")[0]
return (wav_path, _SAMPLE_RATE, wav_stem, label, speaker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
File name
str:
Label (one of ``"neu"``, ``"hap"``, ``"ang"``, ``"sad"``)
str:
Speaker
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
def __len__(self):
return len(self.data)
|
import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, HTTPException, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
get_user_by_email,
set_user_email_verification,
unsubscribe_user_by_token,
)
from backend.server.routers.postmark.models import (
PostmarkBounceEnum,
PostmarkBounceWebhook,
PostmarkClickWebhook,
PostmarkDeliveryWebhook,
PostmarkOpenWebhook,
PostmarkSpamComplaintWebhook,
PostmarkSubscriptionChangeWebhook,
PostmarkWebhook,
)
from backend.util.settings import Settings
settings = Settings()
postmark_validator = APIKeyValidator(
"X-Postmark-Webhook-Token",
settings.secrets.postmark_webhook_token,
)
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post("/unsubscribe", summary="One Click Email Unsubscribe")
async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
logger.info("Received unsubscribe request from One Click Unsubscribe")
try:
await unsubscribe_user_by_token(token)
except Exception as e:
logger.exception("Unsubscribe failed: %s", e)
raise HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Verify Postmark token settings."},
)
return JSONResponse(status_code=200, content={"status": "ok"})
@router.post(
"/",
dependencies=[Depends(postmark_validator.get_dependency())],
summary="Handle Postmark Email Webhooks",
)
async def postmark_webhook_handler(
webhook: Annotated[
PostmarkWebhook,
Body(discriminator="RecordType"),
]
):
logger.info(f"Received webhook from Postmark: {webhook}")
match webhook:
case PostmarkDeliveryWebhook():
delivery_handler(webhook)
case PostmarkBounceWebhook():
await bounce_handler(webhook)
case PostmarkSpamComplaintWebhook():
spam_handler(webhook)
case PostmarkOpenWebhook():
open_handler(webhook)
case PostmarkClickWebhook():
click_handler(webhook)
case PostmarkSubscriptionChangeWebhook():
subscription_handler(webhook)
case _:
logger.warning(
"Unhandled Postmark webhook type %s. Update handler mappings.",
type(webhook),
)
return
async def bounce_handler(event: PostmarkBounceWebhook):
logger.info(f"Bounce handler {event=}")
if event.TypeCode in [
PostmarkBounceEnum.Transient,
PostmarkBounceEnum.SoftBounce,
PostmarkBounceEnum.DnsError,
]:
logger.info(
f"Softish bounce: {event.TypeCode} for {event.Email}, not setting email verification to false"
)
return
logger.info(f"{event.Email=}")
user = await get_user_by_email(event.Email)
if not user:
logger.warning(
"Received bounce for unknown email %s. Ensure user records are current.",
event.Email,
)
return
await set_user_email_verification(user.id, False)
logger.debug(f"Setting email verification to false for user: {user.id}")
def spam_handler(event: PostmarkSpamComplaintWebhook):
logger.info("Spam handler")
pass
def delivery_handler(event: PostmarkDeliveryWebhook):
logger.info("Delivery handler")
pass
def open_handler(event: PostmarkOpenWebhook):
logger.info("Open handler")
pass
def click_handler(event: PostmarkClickWebhook):
logger.info("Click handler")
pass
def subscription_handler(event: PostmarkSubscriptionChangeWebhook):
logger.info("Subscription handler")
pass
|
import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, HTTPException, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
get_user_by_email,
set_user_email_verification,
unsubscribe_user_by_token,
)
from backend.server.routers.postmark.models import (
PostmarkBounceEnum,
PostmarkBounceWebhook,
PostmarkClickWebhook,
PostmarkDeliveryWebhook,
PostmarkOpenWebhook,
PostmarkSpamComplaintWebhook,
PostmarkSubscriptionChangeWebhook,
PostmarkWebhook,
)
from backend.util.settings import Settings
settings = Settings()
postmark_validator = APIKeyValidator(
"X-Postmark-Webhook-Token",
settings.secrets.postmark_webhook_token,
)
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post("/unsubscribe")
async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
logger.info("Received unsubscribe request from One Click Unsubscribe")
try:
await unsubscribe_user_by_token(token)
except Exception as e:
logger.exception("Unsubscribe failed: %s", e)
raise HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Verify Postmark token settings."},
)
return JSONResponse(status_code=200, content={"status": "ok"})
@router.post("/", dependencies=[Depends(postmark_validator.get_dependency())])
async def postmark_webhook_handler(
webhook: Annotated[
PostmarkWebhook,
Body(discriminator="RecordType"),
]
):
logger.info(f"Received webhook from Postmark: {webhook}")
match webhook:
case PostmarkDeliveryWebhook():
delivery_handler(webhook)
case PostmarkBounceWebhook():
await bounce_handler(webhook)
case PostmarkSpamComplaintWebhook():
spam_handler(webhook)
case PostmarkOpenWebhook():
open_handler(webhook)
case PostmarkClickWebhook():
click_handler(webhook)
case PostmarkSubscriptionChangeWebhook():
subscription_handler(webhook)
case _:
logger.warning(
"Unhandled Postmark webhook type %s. Update handler mappings.",
type(webhook),
)
return
async def bounce_handler(event: PostmarkBounceWebhook):
logger.info(f"Bounce handler {event=}")
if event.TypeCode in [
PostmarkBounceEnum.Transient,
PostmarkBounceEnum.SoftBounce,
PostmarkBounceEnum.DnsError,
]:
logger.info(
f"Softish bounce: {event.TypeCode} for {event.Email}, not setting email verification to false"
)
return
logger.info(f"{event.Email=}")
user = await get_user_by_email(event.Email)
if not user:
logger.warning(
"Received bounce for unknown email %s. Ensure user records are current.",
event.Email,
)
return
await set_user_email_verification(user.id, False)
logger.debug(f"Setting email verification to false for user: {user.id}")
def spam_handler(event: PostmarkSpamComplaintWebhook):
logger.info("Spam handler")
pass
def delivery_handler(event: PostmarkDeliveryWebhook):
logger.info("Delivery handler")
pass
def open_handler(event: PostmarkOpenWebhook):
logger.info("Open handler")
pass
def click_handler(event: PostmarkClickWebhook):
logger.info("Click handler")
pass
def subscription_handler(event: PostmarkSubscriptionChangeWebhook):
logger.info("Subscription handler")
pass
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import os
from sentence_transformers import InputExample, SentenceTransformer, datasets, losses, models
train_examples = []
with open("generated_queries.tsv") as fIn:
for line in fIn:
query, paragraph = line.strip().split("\t", maxsplit=1)
train_examples.append(InputExample(texts=[query, paragraph]))
# For the MultipleNegativesRankingLoss, it is important
# that the batch does not contain duplicate entries, i.e.
# no two equal queries and no two equal paragraphs.
# To ensure this, we use a special data loader
train_dataloader = datasets.NoDuplicatesDataLoader(train_examples, batch_size=64)
# Now we create a SentenceTransformer model from scratch
word_emb = models.Transformer("distilbert-base-uncased")
pooling = models.Pooling(word_emb.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_emb, pooling])
# MultipleNegativesRankingLoss requires input pairs (query, relevant_passage)
# and trains the model so that is is suitable for semantic search
train_loss = losses.MultipleNegativesRankingLoss(model)
# Tune the model
num_epochs = 3
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
show_progress_bar=True,
)
os.makedirs("output", exist_ok=True)
model.save("output/programming-model")
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for sematic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
from sentence_transformers import SentenceTransformer, InputExample, losses, models, datasets
import os
train_examples = []
with open('generated_queries.tsv') as fIn:
for line in fIn:
query, paragraph = line.strip().split('\t', maxsplit=1)
train_examples.append(InputExample(texts=[query, paragraph]))
# For the MultipleNegativesRankingLoss, it is important
# that the batch does not contain duplicate entries, i.e.
# no two equal queries and no two equal paragraphs.
# To ensure this, we use a special data loader
train_dataloader = datasets.NoDuplicatesDataLoader(train_examples, batch_size=64)
# Now we create a SentenceTransformer model from scratch
word_emb = models.Transformer('distilbert-base-uncased')
pooling = models.Pooling(word_emb.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_emb, pooling])
# MultipleNegativesRankingLoss requires input pairs (query, relevant_passage)
# and trains the model so that is is suitable for semantic search
train_loss = losses.MultipleNegativesRankingLoss(model)
#Tune the model
num_epochs = 3
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1)
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=num_epochs, warmup_steps=warmup_steps, show_progress_bar=True)
os.makedirs('output', exist_ok=True)
model.save('output/programming-model')
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
import trimesh
import urllib.parse
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
if as_chunks:
from docarray.document import Document
# try to coerce everything into a scene
scene = loader(self.uri, force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = loader(self.uri, force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
import trimesh
import urllib.parse
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
if as_chunks:
from docarray.document import Document
# try to coerce everything into a scene
scene = loader(self.uri, force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = loader(self.uri, force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine import InstanceData
from mmdet.models.dense_heads import EmbeddingRPNHead
from mmdet.structures import DetDataSample
class TestEmbeddingRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
rpn_head.init_weights()
self.assertTrue(rpn_head.init_proposal_bboxes)
self.assertTrue(rpn_head.init_proposal_features)
def test_loss_and_predict(self):
s = 256
img_meta = {
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
feats = [
torch.rand(2, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(5)
]
data_sample = DetDataSample()
data_sample.set_metainfo(img_meta)
# test predict
result_list = rpn_head.predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, list))
self.assertTrue(isinstance(result_list[0], InstanceData))
# test loss_and_predict
result_list = rpn_head.loss_and_predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, tuple))
self.assertTrue(isinstance(result_list[0], dict))
self.assertEqual(len(result_list[0]), 0)
self.assertTrue(isinstance(result_list[1], list))
self.assertTrue(isinstance(result_list[1][0], InstanceData))
# test loss
with pytest.raises(NotImplementedError):
rpn_head.loss(feats, [data_sample])
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine import InstanceData
from mmdet.data_elements import DetDataSample
from mmdet.models.dense_heads import EmbeddingRPNHead
class TestEmbeddingRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
rpn_head.init_weights()
self.assertTrue(rpn_head.init_proposal_bboxes)
self.assertTrue(rpn_head.init_proposal_features)
def test_loss_and_predict(self):
s = 256
img_meta = {
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
feats = [
torch.rand(2, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(5)
]
data_sample = DetDataSample()
data_sample.set_metainfo(img_meta)
# test predict
result_list = rpn_head.predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, list))
self.assertTrue(isinstance(result_list[0], InstanceData))
# test loss_and_predict
result_list = rpn_head.loss_and_predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, tuple))
self.assertTrue(isinstance(result_list[0], dict))
self.assertEqual(len(result_list[0]), 0)
self.assertTrue(isinstance(result_list[1], list))
self.assertTrue(isinstance(result_list[1][0], InstanceData))
# test loss
with pytest.raises(NotImplementedError):
rpn_head.loss(feats, [data_sample])
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_magvit import AutoencoderKLMagvit
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_kl_wan import AutoencoderKLWan
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_kl_wan import AutoencoderKLWan
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.3',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.0.1',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.0.1',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.1',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.1',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.0.1',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.0.1',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import _get_librispeech_metadata
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
_SUBSET_MAP = {"10min": ["1h/0"], "1h": ["1h/*"], "10h": ["1h/*", "9h"]}
def _get_fileids_paths(path, folders, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
path = Path(path)
files_paths = []
for folder in folders:
paths = [p.relative_to(path) for p in path.glob(f"{folder}/*/*/*/*{_ext_audio}")]
files_paths += [(str(p.parent.parent.parent), str(p.stem)) for p in paths] # get subset folder and file name
files_paths.sort(key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Create a Dataset for LibriLightLimited, which is the supervised subset of
LibriLight dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``10min``, ``1h``, ``10h``]
(Default: ``10min``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in _SUBSET_MAP:
raise ValueError(f"`subset` must be one of {_SUBSET_MAP.keys()}. Found: {subset}")
folders = _SUBSET_MAP[subset]
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, folders, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
file_path, fileid = self._fileids_paths[n]
metadata = _get_librispeech_metadata(fileid, self._path, file_path, self._ext_audio, self._ext_txt)
waveform, _ = torchaudio.load(os.path.join(self._path, metadata[0]))
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self._fileids_paths)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import load_librispeech_item
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
def _get_fileids_paths(path, subset, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
if subset == "10min":
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/0/*/*/*/*" + _ext_audio)
]
elif subset in ["1h", "10h"]:
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/*/*/*/*/*" + _ext_audio)
]
if subset == "10h":
files_paths += [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("9h/*/*/*/*" + _ext_audio)
]
else:
raise ValueError(f"Unsupported subset value. Found {subset}.")
files_paths = sorted(files_paths, key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Create a Dataset for LibriLightLimited, which is the supervised subset of
LibriLight dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``10min``, ``1h``, ``10h``]
(Default: ``10min``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in ["10min", "1h", "10h"]:
raise ValueError("`subset` must be one of ['10min', '1h', '10h']")
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, subset, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
file_path, fileid = self._fileids_paths[n]
return load_librispeech_item(fileid, file_path, self._ext_audio, self._ext_txt)
def __len__(self) -> int:
return len(self._fileids_paths)
|
"""LangSmith evaluation utilities.
This module provides utilities for evaluating Chains and other language model
applications using LangChain evaluators and LangSmith.
For more information on the LangSmith API, see the `LangSmith API documentation <https://docs.smith.langchain.com/docs/>`_.
**Example**
.. code-block:: python
from langsmith import Client
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import EvaluatorType, RunEvalConfig, run_on_dataset
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
evaluation_config = RunEvalConfig(
evaluators=[
EvaluatorType.QA, # "Correctness" against a reference answer
EvaluatorType.EMBEDDING_DISTANCE,
RunEvalConfig.Criteria("helpfulness"),
RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config
)
**Attributes**
- ``arun_on_dataset``: Asynchronous function to evaluate a chain or other LangChain component over a dataset.
- ``run_on_dataset``: Function to evaluate a chain or other LangChain component over a dataset.
- ``RunEvalConfig``: Class representing the configuration for running evaluation.
- ``StringRunEvaluatorChain``: Class representing a string run evaluator chain.
- ``InputFormatError``: Exception raised when the input format is incorrect.
""" # noqa: E501
from langchain.smith.evaluation.config import RunEvalConfig
from langchain.smith.evaluation.runner_utils import (
InputFormatError,
arun_on_dataset,
run_on_dataset,
)
from langchain.smith.evaluation.string_run_evaluator import StringRunEvaluatorChain
__all__ = [
"InputFormatError",
"RunEvalConfig",
"StringRunEvaluatorChain",
"arun_on_dataset",
"run_on_dataset",
]
|
"""LangSmith evaluation utilities.
This module provides utilities for evaluating Chains and other language model
applications using LangChain evaluators and LangSmith.
For more information on the LangSmith API, see the `LangSmith API documentation <https://docs.smith.langchain.com/docs/>`_.
**Example**
.. code-block:: python
from langsmith import Client
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import EvaluatorType, RunEvalConfig, run_on_dataset
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
evaluation_config = RunEvalConfig(
evaluators=[
EvaluatorType.QA, # "Correctness" against a reference answer
EvaluatorType.EMBEDDING_DISTANCE,
RunEvalConfig.Criteria("helpfulness"),
RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config
)
**Attributes**
- ``arun_on_dataset``: Asynchronous function to evaluate a chain or other LangChain component over a dataset.
- ``run_on_dataset``: Function to evaluate a chain or other LangChain component over a dataset.
- ``RunEvalConfig``: Class representing the configuration for running evaluation.
- ``StringRunEvaluatorChain``: Class representing a string run evaluator chain.
- ``InputFormatError``: Exception raised when the input format is incorrect.
""" # noqa: E501
from langchain.smith.evaluation.config import RunEvalConfig
from langchain.smith.evaluation.runner_utils import (
InputFormatError,
arun_on_dataset,
run_on_dataset,
)
from langchain.smith.evaluation.string_run_evaluator import StringRunEvaluatorChain
__all__ = [
"InputFormatError",
"arun_on_dataset",
"run_on_dataset",
"StringRunEvaluatorChain",
"RunEvalConfig",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, StepLR, StepMomentum,
StepParamScheduler, _ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict',
'OneCycleParamScheduler', 'OneCycleLR'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, StepLR, StepMomentum,
StepParamScheduler, _ParamScheduler)
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict'
]
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .squim_pipeline import SQUIM_OBJECTIVE, SQUIM_SUBJECTIVE, SquimObjectiveBundle, SquimSubjectiveBundle
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
"SQUIM_OBJECTIVE",
"SQUIM_SUBJECTIVE",
"SquimObjectiveBundle",
"SquimSubjectiveBundle",
]
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .squim_pipeline import SQUIM_OBJECTIVE, SquimObjectiveBundle
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
"SQUIM_OBJECTIVE",
"SquimObjectiveBundle",
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./centernet_tta.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
to_float32=True),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}},
)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./centernet_tta.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
import asyncio
import logging
import os
import threading
import time
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.warning(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
func_retry = retry(
reraise=False,
stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=1, min=1, max=30),
)
def continuous_retry(*, retry_delay: float = 1.0):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except Exception as exc:
logger.exception(
"%s failed with %s — retrying in %.2f s",
func.__name__,
exc,
retry_delay,
)
time.sleep(retry_delay)
return wrapper
return decorator
|
import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.warning(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
func_retry = retry(
reraise=False,
stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=1, min=1, max=30),
)
|
import asyncio
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, Optional, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
logger = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}/{channel_key}"
logger.debug(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.debug(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _get_pubsub_channel(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
full_channel_name = f"{self.event_bus_name}/{channel_key}"
pubsub = connection.pubsub()
return pubsub, full_channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
self.connection, channel_key
)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(full_channel_name)
else:
pubsub.subscribe(full_channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
await self.connection, channel_key
)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(full_channel_name)
else:
await pubsub.subscribe(full_channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
async def wait_for_event(
self, channel_key: str, timeout: Optional[float] = None
) -> M | None:
try:
return await asyncio.wait_for(
anext(aiter(self.listen_events(channel_key))), timeout
)
except TimeoutError:
return None
|
import asyncio
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, Optional, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
logger = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}/{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _get_pubsub_channel(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
full_channel_name = f"{self.event_bus_name}/{channel_key}"
pubsub = connection.pubsub()
return pubsub, full_channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
self.connection, channel_key
)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(full_channel_name)
else:
pubsub.subscribe(full_channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
await self.connection, channel_key
)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(full_channel_name)
else:
await pubsub.subscribe(full_channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
async def wait_for_event(
self, channel_key: str, timeout: Optional[float] = None
) -> M | None:
try:
return await asyncio.wait_for(
anext(aiter(self.listen_events(channel_key))), timeout
)
except TimeoutError:
return None
|
__version__ = '0.15.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.15.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.multion.update_session import (
MultionUpdateSession,
UpdateSessionSchema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UpdateSessionSchema": "langchain_community.tools.multion.update_session",
"MultionUpdateSession": "langchain_community.tools.multion.update_session",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MultionUpdateSession",
"UpdateSessionSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.multion.update_session import (
MultionUpdateSession,
UpdateSessionSchema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UpdateSessionSchema": "langchain_community.tools.multion.update_session",
"MultionUpdateSession": "langchain_community.tools.multion.update_session",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UpdateSessionSchema",
"MultionUpdateSession",
]
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode="mean",
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-mean-pooling-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-mean-pooling-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
"""
Sphinx Read the Docs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
from os import path
import sphinx
__version__ = "0.5.0"
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
if sphinx.version_info >= (1, 6, 0):
# Register the theme that can be referenced without adding a theme path
app.add_html_theme("sphinx_rtd_theme", path.abspath(path.dirname(__file__)))
if sphinx.version_info >= (1, 8, 0):
# Add Sphinx message catalog for newer versions of Sphinx
# See http://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_message_catalog
rtd_locale_path = path.join(path.abspath(path.dirname(__file__)), "locale")
app.add_message_catalog("sphinx", rtd_locale_path)
return {"parallel_read_safe": True, "parallel_write_safe": True}
|
"""
Sphinx Read the Docs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
from os import path
import sphinx
__version__ = "0.5.0"
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
if sphinx.version_info >= (1, 6, 0):
# Register the theme that can be referenced without adding a theme path
app.add_html_theme("sphinx_rtd_theme", path.abspath(path.dirname(__file__)))
if sphinx.version_info >= (1, 8, 0):
# Add Sphinx message catalog for newer versions of Sphinx
# See http://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_message_catalog
rtd_locale_path = path.join(path.abspath(path.dirname(__file__)), "locale")
app.add_message_catalog("sphinx", rtd_locale_path)
return {"parallel_read_safe": True, "parallel_write_safe": True}
|
"""Copyright 2024-2025, XGBoost contributors"""
from functools import partial, update_wrapper
from typing import Any
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
import xgboost as xgb
from xgboost import collective as coll
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory, get_rabit_args
from xgboost.tracker import RabitTracker
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_qdm: bool) -> None:
n_workers = 2
with LocalCUDACluster(n_workers=2) as cluster:
with Client(cluster) as client:
args = get_rabit_args(client, 2)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cuda",
comm_args=args,
is_qdm=is_qdm,
)
client.gather(futs)
@pytest.mark.skipif(**tm.no_loky())
def test_extmem_qdm_distributed() -> None:
from loky import get_reusable_executor
n_samples_per_batch = 2048
n_features = 128
n_batches = 8
def do_train(ordinal: int) -> None:
it = tm.IteratorForTest(
*tm.make_batches(n_samples_per_batch, n_features, n_batches, use_cupy=True),
cache="cache",
on_host=True,
)
Xy = xgb.ExtMemQuantileDMatrix(it)
results: dict[str, Any] = {}
booster = xgb.train(
{"device": f"cuda:{ordinal}"},
num_boost_round=2,
dtrain=Xy,
evals=[(Xy, "Train")],
evals_result=results,
)
assert tm.non_increasing(results["Train"]["rmse"])
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=2)
tracker.start()
args = tracker.worker_args()
def local_test(worker_id: int, rabit_args: dict) -> None:
import cupy as cp
cp.cuda.runtime.setDevice(worker_id)
with coll.CommunicatorContext(**rabit_args, DMLC_TASK_ID=str(worker_id)):
assert coll.get_rank() == worker_id
do_train(coll.get_rank())
n_workers = 2
fn = update_wrapper(partial(local_test, rabit_args=args), local_test)
with get_reusable_executor(max_workers=n_workers) as pool:
results = pool.map(fn, range(n_workers))
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_qdm: bool) -> None:
n_workers = 2
with LocalCUDACluster(n_workers=2) as cluster:
with Client(cluster) as client:
args = get_rabit_args(client, 2)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cuda",
comm_args=args,
is_qdm=is_qdm,
)
client.gather(futs)
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
from ...helper import __windows__
def _uri_to_blob(uri: str) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{uri}` is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
if __windows__:
file_ctx = open(file, 'wb', newline='')
else:
file_ctx = open(file, 'wb')
return file_ctx
def _to_datauri(
mimetype, data, charset: str = 'utf-8', base64: bool = False, binary: bool = True
) -> str:
"""
Convert data to data URI.
:param mimetype: MIME types (e.g. 'text/plain','image/png' etc.)
:param data: Data representations.
:param charset: Charset may be any character set registered with IANA
:param base64: Used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:param binary: True if from binary data False for other data (e.g. text)
:return: URI data
"""
parts = ['data:', mimetype]
if charset is not None:
parts.extend([';charset=', charset])
if base64:
parts.append(';base64')
from base64 import encodebytes as encode64
if binary:
encoded_data = encode64(data).decode(charset).replace('\n', '').strip()
else:
encoded_data = encode64(data).strip()
else:
from urllib.parse import quote_from_bytes, quote
if binary:
encoded_data = quote_from_bytes(data)
else:
encoded_data = quote(data)
parts.extend([',', encoded_data])
return ''.join(parts)
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
from ...helper import __windows__
def _uri_to_blob(uri: str) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'{uri} is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
if __windows__:
file_ctx = open(file, 'wb', newline='')
else:
file_ctx = open(file, 'wb')
return file_ctx
def _to_datauri(
mimetype, data, charset: str = 'utf-8', base64: bool = False, binary: bool = True
) -> str:
"""
Convert data to data URI.
:param mimetype: MIME types (e.g. 'text/plain','image/png' etc.)
:param data: Data representations.
:param charset: Charset may be any character set registered with IANA
:param base64: Used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:param binary: True if from binary data False for other data (e.g. text)
:return: URI data
"""
parts = ['data:', mimetype]
if charset is not None:
parts.extend([';charset=', charset])
if base64:
parts.append(';base64')
from base64 import encodebytes as encode64
if binary:
encoded_data = encode64(data).decode(charset).replace('\n', '').strip()
else:
encoded_data = encode64(data).strip()
else:
from urllib.parse import quote_from_bytes, quote
if binary:
encoded_data = quote_from_bytes(data)
else:
encoded_data = quote(data)
parts.extend([',', encoded_data])
return ''.join(parts)
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
from contextlib import asynccontextmanager as asynccontextmanager
from typing import AsyncGenerator, ContextManager, TypeVar
import anyio.to_thread
from anyio import CapacityLimiter
from starlette.concurrency import iterate_in_threadpool as iterate_in_threadpool # noqa
from starlette.concurrency import run_in_threadpool as run_in_threadpool # noqa
from starlette.concurrency import ( # noqa
run_until_first_complete as run_until_first_complete,
)
_T = TypeVar("_T")
@asynccontextmanager
async def contextmanager_in_threadpool(
cm: ContextManager[_T],
) -> AsyncGenerator[_T, None]:
# blocking __exit__ from running waiting on a free thread
# can create race conditions/deadlocks if the context manager itself
# has its own internal pool (e.g. a database connection pool)
# to avoid this we let __exit__ run without a capacity limit
# since we're creating a new limiter for each call, any non-zero limit
# works (1 is arbitrary)
exit_limiter = CapacityLimiter(1)
try:
yield await run_in_threadpool(cm.__enter__)
except Exception as e:
ok = bool(
await anyio.to_thread.run_sync(
cm.__exit__, type(e), e, e.__traceback__, limiter=exit_limiter
)
)
if not ok:
raise e
else:
await anyio.to_thread.run_sync(
cm.__exit__, None, None, None, limiter=exit_limiter
)
|
from contextlib import asynccontextmanager as asynccontextmanager
from typing import AsyncGenerator, ContextManager, TypeVar
import anyio
from anyio import CapacityLimiter
from starlette.concurrency import iterate_in_threadpool as iterate_in_threadpool # noqa
from starlette.concurrency import run_in_threadpool as run_in_threadpool # noqa
from starlette.concurrency import ( # noqa
run_until_first_complete as run_until_first_complete,
)
_T = TypeVar("_T")
@asynccontextmanager
async def contextmanager_in_threadpool(
cm: ContextManager[_T],
) -> AsyncGenerator[_T, None]:
# blocking __exit__ from running waiting on a free thread
# can create race conditions/deadlocks if the context manager itself
# has its own internal pool (e.g. a database connection pool)
# to avoid this we let __exit__ run without a capacity limit
# since we're creating a new limiter for each call, any non-zero limit
# works (1 is arbitrary)
exit_limiter = CapacityLimiter(1)
try:
yield await run_in_threadpool(cm.__enter__)
except Exception as e:
ok = bool(
await anyio.to_thread.run_sync(
cm.__exit__, type(e), e, None, limiter=exit_limiter
)
)
if not ok:
raise e
else:
await anyio.to_thread.run_sync(
cm.__exit__, None, None, None, limiter=exit_limiter
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
MEDIUM = "medium"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
MEDIUM = "medium"
NOTION = "notion"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
# training schedule for 2x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
# training schedule for 2x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode
from llama_index.core.settings import Settings
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class ComposableGraphQueryEngine(BaseQueryEngine):
"""
Composable graph query engine.
This query engine can operate over a ComposableGraph.
It can take in custom query engines for its sub-indices.
Args:
graph (ComposableGraph): A ComposableGraph object.
custom_query_engines (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
custom query engines.
recursive (bool): Whether to recursively query the graph.
**kwargs: additional arguments to be passed to the underlying index query
engine.
"""
def __init__(
self,
graph: ComposableGraph,
custom_query_engines: Optional[Dict[str, BaseQueryEngine]] = None,
recursive: bool = True,
**kwargs: Any,
) -> None:
"""Init params."""
self._graph = graph
self._custom_query_engines = custom_query_engines or {}
self._kwargs = kwargs
# additional configs
self._recursive = recursive
callback_manager = Settings.callback_manager
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
return {}
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
@dispatcher.span
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
def _query_index(
self,
query_bundle: QueryBundle,
index_id: Optional[str] = None,
level: int = 0,
) -> RESPONSE_TYPE:
"""Query a single index."""
index_id = index_id or self._graph.root_id
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
# get query engine
if index_id in self._custom_query_engines:
query_engine = self._custom_query_engines[index_id]
else:
query_engine = self._graph.get_index(index_id).as_query_engine(
**self._kwargs
)
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = query_engine.retrieve(query_bundle)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
if self._recursive:
# do recursion here
nodes_for_synthesis = []
additional_source_nodes = []
for node_with_score in nodes:
node_with_score, source_nodes = self._fetch_recursive_nodes(
node_with_score, query_bundle, level
)
nodes_for_synthesis.append(node_with_score)
additional_source_nodes.extend(source_nodes)
response = query_engine.synthesize(
query_bundle, nodes_for_synthesis, additional_source_nodes
)
else:
response = query_engine.synthesize(query_bundle, nodes)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def _fetch_recursive_nodes(
self,
node_with_score: NodeWithScore,
query_bundle: QueryBundle,
level: int,
) -> Tuple[NodeWithScore, List[NodeWithScore]]:
"""
Fetch nodes.
Uses existing node if it's not an index node.
Otherwise fetch response from corresponding index.
"""
if isinstance(node_with_score.node, IndexNode):
index_node = node_with_score.node
# recursive call
response = self._query_index(query_bundle, index_node.index_id, level + 1)
new_node = TextNode(text=str(response))
new_node_with_score = NodeWithScore(
node=new_node, score=node_with_score.score
)
return new_node_with_score, response.source_nodes
else:
return node_with_score, []
|
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode
from llama_index.core.settings import Settings
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class ComposableGraphQueryEngine(BaseQueryEngine):
"""Composable graph query engine.
This query engine can operate over a ComposableGraph.
It can take in custom query engines for its sub-indices.
Args:
graph (ComposableGraph): A ComposableGraph object.
custom_query_engines (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
custom query engines.
recursive (bool): Whether to recursively query the graph.
**kwargs: additional arguments to be passed to the underlying index query
engine.
"""
def __init__(
self,
graph: ComposableGraph,
custom_query_engines: Optional[Dict[str, BaseQueryEngine]] = None,
recursive: bool = True,
**kwargs: Any,
) -> None:
"""Init params."""
self._graph = graph
self._custom_query_engines = custom_query_engines or {}
self._kwargs = kwargs
# additional configs
self._recursive = recursive
callback_manager = Settings.callback_manager
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
return {}
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
@dispatcher.span
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
def _query_index(
self,
query_bundle: QueryBundle,
index_id: Optional[str] = None,
level: int = 0,
) -> RESPONSE_TYPE:
"""Query a single index."""
index_id = index_id or self._graph.root_id
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
# get query engine
if index_id in self._custom_query_engines:
query_engine = self._custom_query_engines[index_id]
else:
query_engine = self._graph.get_index(index_id).as_query_engine(
**self._kwargs
)
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = query_engine.retrieve(query_bundle)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
if self._recursive:
# do recursion here
nodes_for_synthesis = []
additional_source_nodes = []
for node_with_score in nodes:
node_with_score, source_nodes = self._fetch_recursive_nodes(
node_with_score, query_bundle, level
)
nodes_for_synthesis.append(node_with_score)
additional_source_nodes.extend(source_nodes)
response = query_engine.synthesize(
query_bundle, nodes_for_synthesis, additional_source_nodes
)
else:
response = query_engine.synthesize(query_bundle, nodes)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def _fetch_recursive_nodes(
self,
node_with_score: NodeWithScore,
query_bundle: QueryBundle,
level: int,
) -> Tuple[NodeWithScore, List[NodeWithScore]]:
"""Fetch nodes.
Uses existing node if it's not an index node.
Otherwise fetch response from corresponding index.
"""
if isinstance(node_with_score.node, IndexNode):
index_node = node_with_score.node
# recursive call
response = self._query_index(query_bundle, index_node.index_id, level + 1)
new_node = TextNode(text=str(response))
new_node_with_score = NodeWithScore(
node=new_node, score=node_with_score.score
)
return new_node_with_score, response.source_nodes
else:
return node_with_score, []
|
# model settings
model = dict(
type='RPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=None,
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='RPN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=None,
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
import csv
import os
from pathlib import Path
from typing import List, Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
assert header[1] == "path"
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* [:footcite:`ardila2020common`].
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
import csv
import os
from pathlib import Path
from typing import List, Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
assert header[1] == "path"
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for CommonVoice.
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
_base_ = 'deformable-detr_r50_16xb2-50e_coco.py'
model = dict(with_box_refine=True)
|
_base_ = 'deformable-detr_r50_16xb2-50e_coco.py'
model = dict(bbox_head=dict(with_box_refine=True))
|
import subprocess
import pytest
from clip_text import CLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for DeiT."""
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
)
from ...utils import auto_docstring
@auto_docstring
class DeiTImageProcessorFast(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 256, "width": 256}
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
__all__ = ["DeiTImageProcessorFast"]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for DeiT."""
from ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
)
from ...utils import add_start_docstrings
@add_start_docstrings(
"Constructs a fast DeiT image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class DeiTImageProcessorFast(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 256, "width": 256}
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
__all__ = ["DeiTImageProcessorFast"]
|
# mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _create_fake_pg(prefix_store, rank, world_size, timeout):
"""
A fake process group (not related to FakeTensor) is a process group which
doesn't actually do any communication, it just hallucinates some
communication. You can run a single rank with a fake process group
without needing multiple processes (simulates per-rank behavior)
NOTE: This is not a real process group, and it would produce wrong results
for every collective. It should be used as a convenient tool when playing
with distributed but don't care about the actual data.
"""
return FakeProcessGroup(rank, world_size)
dist.Backend.register_backend("fake", _create_fake_pg, devices=["cpu", "cuda"])
|
# mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _create_fake_pg(prefix_store, rank, world_size, timeout):
"""
A fake process group (not related to FakeTensor) is a process group which
doesn't actually do any communication, it just hallucinates some
communication. You can run a single rank with a fake process group
without needing multiple processes (simulates per-rank behavior)
NOTE: This is not a real process group, and it would produce wrong results
for every collective. It should be used as a convinient tool when playing
with distributed but don't care about the actual data.
"""
return FakeProcessGroup(rank, world_size)
dist.Backend.register_backend("fake", _create_fake_pg, devices=["cpu", "cuda"])
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> Dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py'
]
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param port: The port of the Gateway, which the client should connect to.
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.port = port
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
@property
def should_exit(self) -> bool:
"""
Boolean flag that indicates whether the gateway server should exit or not
:return: boolean flag
"""
return self.server.should_exit
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param port: The port of the Gateway, which the client should connect to.
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.port = port
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
@property
def should_exit(self) -> bool:
"""
Boolean flag that indicates whether the gateway server should exit or not
:return: boolean flag
"""
return self.server.should_exit
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.legacy import saving as saving
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.legacy import saving
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSingleStageDetector(TestCase):
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
'centernet/centernet_resnet18_140e_coco.py',
# 'fsaf/fsaf_r50_fpn_1x_coco.py', 'yolox/yolox_tiny_8x8_300e_coco.py',
# 'yolo/yolov3_mobilenetv2_320_300e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
assert isinstance(losses, dict)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
assert isinstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSingleStageDetector(TestCase):
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
# 'centernet/centernet_resnet18_140e_coco.py',
# 'fsaf/fsaf_r50_fpn_1x_coco.py', 'yolox/yolox_tiny_8x8_300e_coco.py',
# 'yolo/yolov3_mobilenetv2_320_300e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
assert isinstance(losses, dict)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
assert isinstance(batch_results, tuple)
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial",
"Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial",
"Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
|
"""
=====================================
How to write your own Datapoint class
=====================================
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_plot_datapoints.py`.
"""
# %%
import torch
import torchvision
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.datapoints.Datapoint` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.datapoints.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/datapoints/_bounding_box.py>`_.
class MyDatapoint(datapoints.Datapoint):
pass
my_dp = MyDatapoint([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom Datapoint class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyDatapoint class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", datapoint_cls=MyDatapoint)
def hflip_my_datapoint(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return datapoints.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.datapoints.wrap` is used, see
# :ref:`datapoint_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyDatapoint`` instance:
my_dp = MyDatapoint(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_datapoint(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return datapoints.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
"""
=====================================
How to write your own Datapoint class
=====================================
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_plot_datapoints.py`.
"""
# %%
import torch
import torchvision
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.datapoints.Datapoint` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.datapoints.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/datapoints/_bounding_box.py>`_.
class MyDatapoint(datapoints.Datapoint):
pass
my_dp = MyDatapoint([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom Datapoint class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyDatapoint class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", datapoint_cls=MyDatapoint)
def hflip_my_datapoint(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return MyDatapoint.wrap_like(my_dp, out)
# %%
# To understand why ``wrap_like`` is used, see
# :ref:`datapoint_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyDatapoint`` instance:
my_dp = MyDatapoint(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_datapoint(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return MyDatapoint.wrap_like(my_dp, out)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
"""
================================================================
Using KBinsDiscretizer to discretize continuous features
================================================================
The example compares prediction result of linear regression (linear model)
and decision tree (tree based model) with and without discretization of
real-valued features.
As is shown in the result before discretization, linear model is fast to
build and relatively straightforward to interpret, but can only model
linear relationships, while decision tree can build a much more complex model
of the data. One way to make linear model more powerful on continuous data
is to use discretization (also known as binning). In the example, we
discretize the feature and one-hot encode the transformed data. Note that if
the bins are not reasonably wide, there would appear to be a substantially
increased risk of overfitting, so the discretizer parameters should usually
be tuned under cross validation.
After discretization, linear regression and decision tree make exactly the
same prediction. As features are constant within each bin, any model must
predict the same value for all points within a bin. Compared with the result
before discretization, linear model become much more flexible while decision
tree gets much less flexible. Note that binning features generally has no
beneficial effect for tree-based models, as these models can learn to split
up the data anywhere.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.tree import DecisionTreeRegressor
# construct the dataset
rnd = np.random.RandomState(42)
X = rnd.uniform(-3, 3, size=100)
y = np.sin(X) + rnd.normal(size=len(X)) / 3
X = X.reshape(-1, 1)
# transform the dataset with KBinsDiscretizer
enc = KBinsDiscretizer(
n_bins=10, encode="onehot", quantile_method="averaged_inverted_cdf"
)
X_binned = enc.fit_transform(X)
# predict with original dataset
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(10, 4))
line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
reg = LinearRegression().fit(X, y)
ax1.plot(line, reg.predict(line), linewidth=2, color="green", label="linear regression")
reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X, y)
ax1.plot(line, reg.predict(line), linewidth=2, color="red", label="decision tree")
ax1.plot(X[:, 0], y, "o", c="k")
ax1.legend(loc="best")
ax1.set_ylabel("Regression output")
ax1.set_xlabel("Input feature")
ax1.set_title("Result before discretization")
# predict with transformed dataset
line_binned = enc.transform(line)
reg = LinearRegression().fit(X_binned, y)
ax2.plot(
line,
reg.predict(line_binned),
linewidth=2,
color="green",
linestyle="-",
label="linear regression",
)
reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X_binned, y)
ax2.plot(
line,
reg.predict(line_binned),
linewidth=2,
color="red",
linestyle=":",
label="decision tree",
)
ax2.plot(X[:, 0], y, "o", c="k")
ax2.vlines(enc.bin_edges_[0], *plt.gca().get_ylim(), linewidth=1, alpha=0.2)
ax2.legend(loc="best")
ax2.set_xlabel("Input feature")
ax2.set_title("Result after discretization")
plt.tight_layout()
plt.show()
|
"""
================================================================
Using KBinsDiscretizer to discretize continuous features
================================================================
The example compares prediction result of linear regression (linear model)
and decision tree (tree based model) with and without discretization of
real-valued features.
As is shown in the result before discretization, linear model is fast to
build and relatively straightforward to interpret, but can only model
linear relationships, while decision tree can build a much more complex model
of the data. One way to make linear model more powerful on continuous data
is to use discretization (also known as binning). In the example, we
discretize the feature and one-hot encode the transformed data. Note that if
the bins are not reasonably wide, there would appear to be a substantially
increased risk of overfitting, so the discretizer parameters should usually
be tuned under cross validation.
After discretization, linear regression and decision tree make exactly the
same prediction. As features are constant within each bin, any model must
predict the same value for all points within a bin. Compared with the result
before discretization, linear model become much more flexible while decision
tree gets much less flexible. Note that binning features generally has no
beneficial effect for tree-based models, as these models can learn to split
up the data anywhere.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.tree import DecisionTreeRegressor
# construct the dataset
rnd = np.random.RandomState(42)
X = rnd.uniform(-3, 3, size=100)
y = np.sin(X) + rnd.normal(size=len(X)) / 3
X = X.reshape(-1, 1)
# transform the dataset with KBinsDiscretizer
enc = KBinsDiscretizer(n_bins=10, encode="onehot")
X_binned = enc.fit_transform(X)
# predict with original dataset
fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(10, 4))
line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
reg = LinearRegression().fit(X, y)
ax1.plot(line, reg.predict(line), linewidth=2, color="green", label="linear regression")
reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X, y)
ax1.plot(line, reg.predict(line), linewidth=2, color="red", label="decision tree")
ax1.plot(X[:, 0], y, "o", c="k")
ax1.legend(loc="best")
ax1.set_ylabel("Regression output")
ax1.set_xlabel("Input feature")
ax1.set_title("Result before discretization")
# predict with transformed dataset
line_binned = enc.transform(line)
reg = LinearRegression().fit(X_binned, y)
ax2.plot(
line,
reg.predict(line_binned),
linewidth=2,
color="green",
linestyle="-",
label="linear regression",
)
reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X_binned, y)
ax2.plot(
line,
reg.predict(line_binned),
linewidth=2,
color="red",
linestyle=":",
label="decision tree",
)
ax2.plot(X[:, 0], y, "o", c="k")
ax2.vlines(enc.bin_edges_[0], *plt.gca().get_ylim(), linewidth=1, alpha=0.2)
ax2.legend(loc="best")
ax2.set_xlabel("Input feature")
ax2.set_title("Result after discretization")
plt.tight_layout()
plt.show()
|
import copy
import pytest
import torch
from common_utils import assert_equal
from torchvision.models.detection import _utils, backbone_utils
from torchvision.models.detection.transform import GeneralizedRCNNTransform
class TestModelsDetectionUtils:
def test_balanced_positive_negative_sampler(self):
sampler = _utils.BalancedPositiveNegativeSampler(4, 0.25)
# keep all 6 negatives first, then add 3 positives, last two are ignore
matched_idxs = [torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, -1, -1])]
pos, neg = sampler(matched_idxs)
# we know the number of elements that should be sampled for the positive (1)
# and the negative (3), and their location. Let's make sure that they are
# there
assert pos[0].sum() == 1
assert pos[0][6:9].sum() == 1
assert neg[0].sum() == 3
assert neg[0][0:6].sum() == 3
def test_box_linear_coder(self):
box_coder = _utils.BoxLinearCoder(normalize_by_size=True)
# Generate a random 10x4 boxes tensor, with coordinates < 50.
boxes = torch.rand(10, 4) * 50
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
boxes[:, 2:] += boxes[:, :2]
proposals = torch.tensor([0, 0, 101, 101] * 10).reshape(10, 4).float()
rel_codes = box_coder.encode(boxes, proposals)
pred_boxes = box_coder.decode(rel_codes, boxes)
torch.allclose(proposals, pred_boxes)
@pytest.mark.parametrize("train_layers, exp_froz_params", [(0, 53), (1, 43), (2, 24), (3, 11), (4, 1), (5, 0)])
def test_resnet_fpn_backbone_frozen_layers(self, train_layers, exp_froz_params):
# we know how many initial layers and parameters of the network should
# be frozen for each trainable_backbone_layers parameter value
# i.e. all 53 params are frozen if trainable_backbone_layers=0
# ad first 24 params are frozen if trainable_backbone_layers=2
model = backbone_utils.resnet_fpn_backbone("resnet50", weights=None, trainable_layers=train_layers)
# boolean list that is true if the param at that index is frozen
is_frozen = [not parameter.requires_grad for _, parameter in model.named_parameters()]
# check that expected initial number of layers are frozen
assert all(is_frozen[:exp_froz_params])
def test_validate_resnet_inputs_detection(self):
# default number of backbone layers to train
ret = backbone_utils._validate_trainable_layers(
is_trained=True, trainable_backbone_layers=None, max_value=5, default_value=3
)
assert ret == 3
# can't go beyond 5
with pytest.raises(ValueError, match=r"Trainable backbone layers should be in the range"):
ret = backbone_utils._validate_trainable_layers(
is_trained=True, trainable_backbone_layers=6, max_value=5, default_value=3
)
# if not trained, should use all trainable layers and warn
with pytest.warns(UserWarning):
ret = backbone_utils._validate_trainable_layers(
is_trained=False, trainable_backbone_layers=0, max_value=5, default_value=3
)
assert ret == 5
def test_transform_copy_targets(self):
transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3))
image = [torch.rand(3, 200, 300), torch.rand(3, 200, 200)]
targets = [{"boxes": torch.rand(3, 4)}, {"boxes": torch.rand(2, 4)}]
targets_copy = copy.deepcopy(targets)
out = transform(image, targets) # noqa: F841
assert_equal(targets[0]["boxes"], targets_copy[0]["boxes"])
assert_equal(targets[1]["boxes"], targets_copy[1]["boxes"])
def test_not_float_normalize(self):
transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3))
image = [torch.randint(0, 255, (3, 200, 300), dtype=torch.uint8)]
targets = [{"boxes": torch.rand(3, 4)}]
with pytest.raises(TypeError):
out = transform(image, targets) # noqa: F841
if __name__ == "__main__":
pytest.main([__file__])
|
import copy
import pytest
import torch
from common_utils import assert_equal
from torchvision.models.detection import _utils, backbone_utils
from torchvision.models.detection.transform import GeneralizedRCNNTransform
class TestModelsDetectionUtils:
def test_balanced_positive_negative_sampler(self):
sampler = _utils.BalancedPositiveNegativeSampler(4, 0.25)
# keep all 6 negatives first, then add 3 positives, last two are ignore
matched_idxs = [torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, -1, -1])]
pos, neg = sampler(matched_idxs)
# we know the number of elements that should be sampled for the positive (1)
# and the negative (3), and their location. Let's make sure that they are
# there
assert pos[0].sum() == 1
assert pos[0][6:9].sum() == 1
assert neg[0].sum() == 3
assert neg[0][0:6].sum() == 3
def test_box_linear_coder(self):
box_coder = _utils.BoxLinearCoder(normalize_by_size=True)
# Generate a random 10x4 boxes tensor, with coordinates < 50.
boxes = torch.rand(10, 4) * 50
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
boxes[:, 2:] += boxes[:, :2]
proposals = torch.tensor([0, 0, 101, 101] * 10).reshape(10, 4).float()
rel_codes = box_coder.encode(boxes, proposals)
pred_boxes = box_coder.decode(rel_codes, boxes)
torch.allclose(proposals, pred_boxes)
@pytest.mark.parametrize("train_layers, exp_froz_params", [(0, 53), (1, 43), (2, 24), (3, 11), (4, 1), (5, 0)])
def test_resnet_fpn_backbone_frozen_layers(self, train_layers, exp_froz_params):
# we know how many initial layers and parameters of the network should
# be frozen for each trainable_backbone_layers parameter value
# i.e all 53 params are frozen if trainable_backbone_layers=0
# ad first 24 params are frozen if trainable_backbone_layers=2
model = backbone_utils.resnet_fpn_backbone("resnet50", weights=None, trainable_layers=train_layers)
# boolean list that is true if the param at that index is frozen
is_frozen = [not parameter.requires_grad for _, parameter in model.named_parameters()]
# check that expected initial number of layers are frozen
assert all(is_frozen[:exp_froz_params])
def test_validate_resnet_inputs_detection(self):
# default number of backbone layers to train
ret = backbone_utils._validate_trainable_layers(
is_trained=True, trainable_backbone_layers=None, max_value=5, default_value=3
)
assert ret == 3
# can't go beyond 5
with pytest.raises(ValueError, match=r"Trainable backbone layers should be in the range"):
ret = backbone_utils._validate_trainable_layers(
is_trained=True, trainable_backbone_layers=6, max_value=5, default_value=3
)
# if not trained, should use all trainable layers and warn
with pytest.warns(UserWarning):
ret = backbone_utils._validate_trainable_layers(
is_trained=False, trainable_backbone_layers=0, max_value=5, default_value=3
)
assert ret == 5
def test_transform_copy_targets(self):
transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3))
image = [torch.rand(3, 200, 300), torch.rand(3, 200, 200)]
targets = [{"boxes": torch.rand(3, 4)}, {"boxes": torch.rand(2, 4)}]
targets_copy = copy.deepcopy(targets)
out = transform(image, targets) # noqa: F841
assert_equal(targets[0]["boxes"], targets_copy[0]["boxes"])
assert_equal(targets[1]["boxes"], targets_copy[1]["boxes"])
def test_not_float_normalize(self):
transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3))
image = [torch.randint(0, 255, (3, 200, 300), dtype=torch.uint8)]
targets = [{"boxes": torch.rand(3, 4)}]
with pytest.raises(TypeError):
out = transform(image, targets) # noqa: F841
if __name__ == "__main__":
pytest.main([__file__])
|
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.datapoints._datapoint import Datapoint
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
D = TypeVar("D", bound="EncodedData")
class EncodedData(Datapoint):
@classmethod
def _wrap(cls: Type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: Type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> Tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
|
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype.datapoints._datapoint import Datapoint
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
D = TypeVar("D", bound="EncodedData")
class EncodedData(Datapoint):
@classmethod
def _wrap(cls: Type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: Type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> Tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
|
import asyncio
import os
from typing import Dict, List
import pytest
import requests
from jina import Flow
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
from tests.k8s_otel.util import get_last_health_check_data, parse_string_jaeger_tags
@pytest.mark.asyncio
@pytest.mark.timeout(1800)
async def test_flow_resource_labeling(
tmpdir, otel_test_namespace: str, k8s_cluster_v2: KindClusterWrapperV2
):
NAMESPACE = 'test-flow-resource-labeling'
dump_path = os.path.join(tmpdir, NAMESPACE)
logger = JinaLogger(NAMESPACE)
# Create k8s flow artifacts
flow = Flow(
name='test-flow-metrics',
port=8080,
metrics=True,
metrics_exporter_host=f'http://otel-collector.{otel_test_namespace}.svc.cluster.local',
metrics_exporter_port=4317,
tracing=True,
traces_exporter_host=f'http://jaeger.{otel_test_namespace}.svc.cluster.local',
traces_exporter_port=4317,
).add(
name='instrumentation',
uses='docker://test-instrumentation:test-pip',
)
flow.to_kubernetes_yaml(dump_path, k8s_namespace=NAMESPACE)
# Deploy flow
k8s_cluster_v2.deploy_from_dir(dir=dump_path, namespace=NAMESPACE)
# Make client requests
with k8s_cluster_v2.port_forward(
'svc/gateway', NAMESPACE, svc_port=8080
) as gateway_port:
from jina import Client
res = []
async for docs in Client(port=gateway_port, asyncio=True).post("/"):
res.extend(docs)
# # Give grace period for metrics and traces to be exported
await asyncio.sleep(60)
#
# Check Jaeger API
with k8s_cluster_v2.port_forward(
'svc/jaeger', otel_test_namespace, svc_port=16686
) as jaeger_port:
try:
# Gateway
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='gateway'
)
assert trace_data['processes']['p1']['serviceName'] == 'gateway'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'gateway'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('gateway-')
# Instrumentation Executor
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='instrumentation'
)
assert trace_data['processes']['p1']['serviceName'] == 'instrumentation'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'instrumentation'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('instrumentation-')
except AssertionError as e:
logger.error(trace_data)
raise e
with k8s_cluster_v2.port_forward(
'svc/prometheus', otel_test_namespace, svc_port=9090
) as prometheus_port:
try:
# Check Prometheus Labels
prometheus_labels: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/labels'
).json()['data']
assert 'k8s_deployment_name' in prometheus_labels
assert 'k8s_namespace_name' in prometheus_labels
assert 'k8s_pod_name' in prometheus_labels
except AssertionError as e:
logger.error(prometheus_labels)
raise e
try:
depl_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_deployment_name/values'
).json()['data']
assert 'gateway' in depl_values
assert 'instrumentation' in depl_values
except AssertionError as e:
logger.error(depl_values)
raise e
try:
ns_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_namespace_name/values'
).json()['data']
assert NAMESPACE in ns_values
except AssertionError as e:
logger.error(ns_values)
raise e
try:
pod_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_pod_name/values'
).json()['data']
assert any(i.startswith('gateway-') for i in pod_values)
assert any(i.startswith('instrumentation-') for i in pod_values)
except AssertionError as e:
logger.error(pod_values)
raise e
|
import asyncio
import os
from typing import Dict, List
import pytest
import requests
from jina import Flow
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
from tests.k8s_otel.util import get_last_health_check_data, parse_string_jaeger_tags
@pytest.mark.asyncio
@pytest.mark.timeout(1800)
async def test_flow_resource_labeling(
tmpdir, otel_test_namespace: str, k8s_cluster_v2: KindClusterWrapperV2
):
NAMESPACE = 'test-flow-resource-labeling'
dump_path = os.path.join(tmpdir, NAMESPACE)
logger = JinaLogger(NAMESPACE)
# Create k8s flow artifacts
flow = Flow(
name='test-flow-metrics',
port=8080,
metrics=True,
metrics_exporter_host=f'http://otel-collector.{otel_test_namespace}.svc.cluster.local',
metrics_exporter_port=4317,
tracing=True,
traces_exporter_host=f'http://jaeger.{otel_test_namespace}.svc.cluster.local',
traces_exporter_port=4317,
).add(
name='instrumentation',
uses='docker://test-instrumentation:test-pip',
)
flow.to_kubernetes_yaml(dump_path, k8s_namespace=NAMESPACE)
# Deploy flow
k8s_cluster_v2.deploy_from_dir(dir=dump_path, namespace=NAMESPACE)
# Make client requests
with k8s_cluster_v2.port_forward(
'svc/gateway', NAMESPACE, svc_port=8080
) as gateway_port:
from jina import Client
[docs async for docs in Client(port=gateway_port, asyncio=True).post("/")]
# Give grace period for metrics and traces to be exported
await asyncio.sleep(60)
# Check Jaeger API
with k8s_cluster_v2.port_forward(
'svc/jaeger', otel_test_namespace, svc_port=16686
) as jaeger_port:
try:
# Gateway
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='gateway'
)
assert trace_data['processes']['p1']['serviceName'] == 'gateway'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'gateway'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('gateway-')
# Instrumentation Executor
trace_data = get_last_health_check_data(
jaeger_port=jaeger_port, service_name='instrumentation'
)
assert trace_data['processes']['p1']['serviceName'] == 'instrumentation'
tags: Dict[str, str] = parse_string_jaeger_tags(
trace_data['processes']['p1']['tags']
)
assert tags['k8s.deployment.name'] == 'instrumentation'
assert tags['k8s.namespace.name'] == NAMESPACE
assert tags['k8s.pod.name'].startswith('instrumentation-')
except AssertionError as e:
logger.error(trace_data)
raise e
with k8s_cluster_v2.port_forward(
'svc/prometheus', otel_test_namespace, svc_port=9090
) as prometheus_port:
try:
# Check Prometheus Labels
prometheus_labels: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/labels'
).json()['data']
assert 'k8s_deployment_name' in prometheus_labels
assert 'k8s_namespace_name' in prometheus_labels
assert 'k8s_pod_name' in prometheus_labels
except AssertionError as e:
logger.error(prometheus_labels)
raise e
try:
depl_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_deployment_name/values'
).json()['data']
assert 'gateway' in depl_values
assert 'instrumentation' in depl_values
except AssertionError as e:
logger.error(depl_values)
raise e
try:
ns_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_namespace_name/values'
).json()['data']
assert NAMESPACE in ns_values
except AssertionError as e:
logger.error(ns_values)
raise e
try:
pod_values: List[str] = requests.get(
f'http://localhost:{prometheus_port}/api/v1/label/k8s_pod_name/values'
).json()['data']
assert any(i.startswith('gateway-') for i in pod_values)
assert any(i.startswith('instrumentation-') for i in pod_values)
except AssertionError as e:
logger.error(pod_values)
raise e
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
|
"""Base schema for callback managers."""
import uuid
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
# timestamp for callback events
TIMESTAMP_FORMAT = "%m/%d/%Y, %H:%M:%S.%f"
# base trace_id for the tracemap in callback_manager
BASE_TRACE_EVENT = "root"
class CBEventType(str, Enum):
"""
Callback manager event types.
Attributes:
CHUNKING: Logs for the before and after of text splitting.
NODE_PARSING: Logs for the documents and the nodes that they are parsed into.
EMBEDDING: Logs for the number of texts embedded.
LLM: Logs for the template and response of LLM calls.
QUERY: Keeps track of the start and end of each query.
RETRIEVE: Logs for the nodes retrieved for a query.
SYNTHESIZE: Logs for the result for synthesize calls.
TREE: Logs for the summary and level of summaries generated.
SUB_QUESTION: Logs for a generated sub question and answer.
"""
CHUNKING = "chunking"
NODE_PARSING = "node_parsing"
EMBEDDING = "embedding"
LLM = "llm"
QUERY = "query"
RETRIEVE = "retrieve"
SYNTHESIZE = "synthesize"
TREE = "tree"
SUB_QUESTION = "sub_question"
TEMPLATING = "templating"
FUNCTION_CALL = "function_call"
RERANKING = "reranking"
EXCEPTION = "exception"
AGENT_STEP = "agent_step"
class EventPayload(str, Enum):
DOCUMENTS = "documents" # list of documents before parsing
CHUNKS = "chunks" # list of text chunks
NODES = "nodes" # list of nodes
PROMPT = "formatted_prompt" # formatted prompt sent to LLM
MESSAGES = "messages" # list of messages sent to LLM
COMPLETION = "completion" # completion from LLM
RESPONSE = "response" # message response from LLM
QUERY_STR = "query_str" # query used for query engine
SUB_QUESTION = "sub_question" # a sub question & answer + sources
EMBEDDINGS = "embeddings" # list of embeddings
TOP_K = "top_k" # top k nodes retrieved
ADDITIONAL_KWARGS = "additional_kwargs" # additional kwargs for event call
SERIALIZED = "serialized" # serialized object for event caller
FUNCTION_CALL = "function_call" # function call for the LLM
FUNCTION_OUTPUT = "function_call_response" # function call output
TOOL = "tool" # tool used in LLM call
MODEL_NAME = "model_name" # model name used in an event
TEMPLATE = "template" # template used in LLM call
TEMPLATE_VARS = "template_vars" # template variables used in LLM call
SYSTEM_PROMPT = "system_prompt" # system prompt used in LLM call
QUERY_WRAPPER_PROMPT = "query_wrapper_prompt" # query wrapper prompt used in LLM
EXCEPTION = "exception" # exception raised in an event
# events that will never have children events
LEAF_EVENTS = (CBEventType.CHUNKING, CBEventType.LLM, CBEventType.EMBEDDING)
@dataclass
class CBEvent:
"""Generic class to store event information."""
event_type: CBEventType
payload: Optional[Dict[str, Any]] = None
time: str = ""
id_: str = ""
def __post_init__(self) -> None:
"""Init time and id if needed."""
if not self.time:
self.time = datetime.now().strftime(TIMESTAMP_FORMAT)
if not self.id_:
self.id = str(uuid.uuid4())
@dataclass
class EventStats:
"""Time-based Statistics for events."""
total_secs: float
average_secs: float
total_count: int
|
"""Base schema for callback managers."""
import uuid
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
# timestamp for callback events
TIMESTAMP_FORMAT = "%m/%d/%Y, %H:%M:%S.%f"
# base trace_id for the tracemap in callback_manager
BASE_TRACE_EVENT = "root"
class CBEventType(str, Enum):
"""Callback manager event types.
Attributes:
CHUNKING: Logs for the before and after of text splitting.
NODE_PARSING: Logs for the documents and the nodes that they are parsed into.
EMBEDDING: Logs for the number of texts embedded.
LLM: Logs for the template and response of LLM calls.
QUERY: Keeps track of the start and end of each query.
RETRIEVE: Logs for the nodes retrieved for a query.
SYNTHESIZE: Logs for the result for synthesize calls.
TREE: Logs for the summary and level of summaries generated.
SUB_QUESTION: Logs for a generated sub question and answer.
"""
CHUNKING = "chunking"
NODE_PARSING = "node_parsing"
EMBEDDING = "embedding"
LLM = "llm"
QUERY = "query"
RETRIEVE = "retrieve"
SYNTHESIZE = "synthesize"
TREE = "tree"
SUB_QUESTION = "sub_question"
TEMPLATING = "templating"
FUNCTION_CALL = "function_call"
RERANKING = "reranking"
EXCEPTION = "exception"
AGENT_STEP = "agent_step"
class EventPayload(str, Enum):
DOCUMENTS = "documents" # list of documents before parsing
CHUNKS = "chunks" # list of text chunks
NODES = "nodes" # list of nodes
PROMPT = "formatted_prompt" # formatted prompt sent to LLM
MESSAGES = "messages" # list of messages sent to LLM
COMPLETION = "completion" # completion from LLM
RESPONSE = "response" # message response from LLM
QUERY_STR = "query_str" # query used for query engine
SUB_QUESTION = "sub_question" # a sub question & answer + sources
EMBEDDINGS = "embeddings" # list of embeddings
TOP_K = "top_k" # top k nodes retrieved
ADDITIONAL_KWARGS = "additional_kwargs" # additional kwargs for event call
SERIALIZED = "serialized" # serialized object for event caller
FUNCTION_CALL = "function_call" # function call for the LLM
FUNCTION_OUTPUT = "function_call_response" # function call output
TOOL = "tool" # tool used in LLM call
MODEL_NAME = "model_name" # model name used in an event
TEMPLATE = "template" # template used in LLM call
TEMPLATE_VARS = "template_vars" # template variables used in LLM call
SYSTEM_PROMPT = "system_prompt" # system prompt used in LLM call
QUERY_WRAPPER_PROMPT = "query_wrapper_prompt" # query wrapper prompt used in LLM
EXCEPTION = "exception" # exception raised in an event
# events that will never have children events
LEAF_EVENTS = (CBEventType.CHUNKING, CBEventType.LLM, CBEventType.EMBEDDING)
@dataclass
class CBEvent:
"""Generic class to store event information."""
event_type: CBEventType
payload: Optional[Dict[str, Any]] = None
time: str = ""
id_: str = ""
def __post_init__(self) -> None:
"""Init time and id if needed."""
if not self.time:
self.time = datetime.now().strftime(TIMESTAMP_FORMAT)
if not self.id_:
self.id = str(uuid.uuid4())
@dataclass
class EventStats:
"""Time-based Statistics for events."""
total_secs: float
average_secs: float
total_count: int
|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks",
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents,
query,
callbacks=callbacks,
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
msg = f"Got unexpected transformer type: {_transformer}"
raise ValueError(msg)
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks",
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents,
query,
callbacks=callbacks,
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
msg = f"Got unexpected transformer type: {_transformer}"
raise ValueError(msg)
return documents
|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
msg = f"Got unexpected transformer type: {_transformer}"
raise ValueError(msg)
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
msg = f"Got unexpected transformer type: {_transformer}"
raise ValueError(msg)
return documents
|
from docarray.predefined_document.image import Image
from docarray.predefined_document.text import Text
__all__ = ['Text', 'Image']
|
from .image import Image
from .text import Text
__all__ = ['Text', 'Image']
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
file_client_args = dict(backend='disk')
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=image_size, ratio_range=(0.1, 2.0)),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
file_client_args = dict(backend='disk')
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
image_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=image_size,
ratio_range=(0.1, 2.0),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=image_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# Use RepeatDataset to speed up training
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from .tfidf_text_executor import TFIDFTextEncoder
|
from .tfidf_text_executor import TFIDFTextEncoder
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk",
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = requests.post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk", optional=True
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = requests.post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction, AgentActionMessageLog
from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> list[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage or the previous messages plus a FunctionMessage that corresponds to
the original tool invocation
"""
if isinstance(agent_action, AgentActionMessageLog):
return list(agent_action.message_log) + [
_create_function_message(agent_action, observation)
]
else:
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
FunctionMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def format_to_openai_function_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
Raises:
ValueError: if the observation cannot be converted to a string.
"""
messages = []
for agent_action, observation in intermediate_steps:
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
# Backwards compatibility
format_to_openai_functions = format_to_openai_function_messages
|
import json
from typing import List, Sequence, Tuple
from langchain_core.agents import AgentAction, AgentActionMessageLog
from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> List[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage or the previous messages plus a FunctionMessage that corresponds to
the original tool invocation
"""
if isinstance(agent_action, AgentActionMessageLog):
return list(agent_action.message_log) + [
_create_function_message(agent_action, observation)
]
else:
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
FunctionMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def format_to_openai_function_messages(
intermediate_steps: Sequence[Tuple[AgentAction, str]],
) -> List[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
Raises:
ValueError: if the observation cannot be converted to a string.
"""
messages = []
for agent_action, observation in intermediate_steps:
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
# Backwards compatibility
format_to_openai_functions = format_to_openai_function_messages
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RepPointsDetector',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RepPointsHead',
num_classes=80,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RepPointsDetector',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RepPointsHead',
num_classes=80,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
optimizer = dict(lr=0.01)
|
import json
from typing import Dict
import pytest
from jina.orchestrate.deployments.config.k8slib.kubernetes_tools import get_yaml
@pytest.mark.parametrize(
['template', 'params'],
[
('namespace', {'name': 'test-ns'}),
('service', {'name': 'test-svc'}),
('deployment-executor', {'name': 'test-dep', 'protocol': 'grpc'}),
(
'configmap',
{
'name': 'test-configmap-executor',
'namespace': 'test-configmap',
'data': {'k1': 'v1', 'k2': 'v2'},
},
),
],
)
def test_get(template: str, params: Dict):
config = get_yaml(template=template, params=params)
for v in params.values():
if isinstance(v, str):
assert v in json.dumps(config)
elif isinstance(v, dict):
for sub_key, sub_v in v.items():
assert config['data'][sub_key] == sub_v
@pytest.mark.parametrize('template', ['deployment-executor'])
def test_get_deployment_with_device_plugin(template, monkeypatch):
params = {
'name': 'test-name',
'namespace': 'test-namespace',
'image': 'test-image',
'replicas': 1,
'command': 'test-command',
'args': 'test-args',
'protocol': 'grpc',
'port': 1234,
'port_out': 1234,
'port_ctrl': 1234,
'pull_policy': 1234,
'device_plugins': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3},
}
config = get_yaml(template, params)
assert config['spec']['template']['spec']['containers'][0]['resources'] == {
'limits': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3}
}
|
import json
from typing import Dict
import pytest
from jina.orchestrate.deployments.config.k8slib.kubernetes_tools import get_yaml
@pytest.mark.parametrize(
['template', 'params'],
[
('namespace', {'name': 'test-ns'}),
('service', {'name': 'test-svc'}),
('deployment-executor', {'name': 'test-dep'}),
(
'configmap',
{
'name': 'test-configmap-executor',
'namespace': 'test-configmap',
'data': {'k1': 'v1', 'k2': 'v2'},
},
),
],
)
def test_get(template: str, params: Dict):
config = get_yaml(template=template, params=params)
for v in params.values():
if isinstance(v, str):
assert v in json.dumps(config)
elif isinstance(v, dict):
for sub_key, sub_v in v.items():
assert config['data'][sub_key] == sub_v
@pytest.mark.parametrize('template', ['deployment-executor'])
def test_get_deployment_with_device_plugin(template, monkeypatch):
params = {
'name': 'test-name',
'namespace': 'test-namespace',
'image': 'test-image',
'replicas': 1,
'command': 'test-command',
'args': 'test-args',
'port': 1234,
'port_out': 1234,
'port_ctrl': 1234,
'pull_policy': 1234,
'device_plugins': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3},
}
config = get_yaml(template, params)
assert config['spec']['template']['spec']['containers'][0]['resources'] == {
'limits': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3}
}
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable, List
import pytest
from jina import Flow, DocumentArray
from ...sentence_encoder import TransformerSentenceEncoder
@pytest.mark.parametrize(
'request_size', [1, 10, 50, 100]
)
def test_integration(
data_generator: Callable,
request_size: int
):
with Flow().add(uses=TransformerSentenceEncoder) as flow:
resp = flow.post(on='/index', inputs=data_generator(), request_size=request_size, return_results=True)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable, List
import pytest
from jina import Flow, DocumentArray
from jinahub.text.encoders.sentence_encoder import TransformerSentenceEncoder
@pytest.mark.parametrize(
'request_size', [1, 10, 50, 100]
)
def test_integration(
data_generator: Callable,
request_size: int
):
with Flow().add(uses=TransformerSentenceEncoder) as flow:
resp = flow.post(on='/index', inputs=data_generator(), request_size=request_size, return_results=True)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
)
def test_convolve(self, fn):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y)
expected = torch.stack(
[
torch.stack([fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0)).squeeze(0) for j in range(leading_dims[1])])
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
expected = []
for i in range(leading_dims[0]):
for j in range(leading_dims[1]):
for k in range(leading_dims[2]):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
)
def test_convolve(self, fn):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y)
expected = torch.stack(
[
torch.stack([fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0)).squeeze(0) for j in range(leading_dims[1])])
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
|
from sentence_transformers.similarity_functions import SimilarityFunction
__all__ = ["SimilarityFunction"]
|
from enum import Enum
class SimilarityFunction(Enum):
COSINE = 0
EUCLIDEAN = 1
MANHATTAN = 2
DOT_PRODUCT = 3
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are indexed to Elasticsearch together with their respective sentence
embeddings.
The script shows results from BM25 as well as from semantic search with
cosine similarity.
You need Elasticsearch up and running, for example using Docker
(https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html).
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
"""
import csv
import os
import time
from ssl import create_default_context
import tqdm.autonotebook
from elasticsearch import Elasticsearch, helpers
from sentence_transformers import SentenceTransformer, util
es = Elasticsearch(
hosts=["https://localhost:9200"],
basic_auth=("elastic", os.environ["ELASTIC_PASSWORD"]), # displayed at ES server startup
ssl_context=create_default_context(cafile="http_ca.crt"), # copied from inside ES container
)
model = SentenceTransformer("quora-distilbert-multilingual")
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
all_questions = {}
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
all_questions[row["qid1"]] = row["question1"]
if len(all_questions) >= max_corpus_size:
break
all_questions[row["qid2"]] = row["question2"]
if len(all_questions) >= max_corpus_size:
break
qids = list(all_questions.keys())
questions = [all_questions[qid] for qid in qids]
# Index data, if the index does not exists
if not es.indices.exists(index="quora"):
try:
es_index = {
"mappings": {
"properties": {
"question": {"type": "text"},
"question_vector": {"type": "dense_vector", "dims": 768, "index": True, "similarity": "cosine"},
}
}
}
es.indices.create(index="quora", body=es_index)
chunk_size = 500
print("Index data (you can stop it by pressing Ctrl+C once):")
with tqdm.tqdm(total=len(qids)) as pbar:
for start_idx in range(0, len(qids), chunk_size):
end_idx = start_idx + chunk_size
embeddings = model.encode(questions[start_idx:end_idx], show_progress_bar=False)
bulk_data = []
for qid, question, embedding in zip(qids[start_idx:end_idx], questions[start_idx:end_idx], embeddings):
bulk_data.append(
{
"_index": "quora",
"_id": qid,
"_source": {"question": question, "question_vector": embedding},
}
)
helpers.bulk(es, bulk_data)
pbar.update(chunk_size)
except Exception:
print("During index an exception occurred. Continue\n\n")
# Interactive search queries
while True:
inp_question = input("Please enter a question: ")
encode_start_time = time.time()
question_embedding = model.encode(inp_question)
encode_end_time = time.time()
# Lexical search
bm25 = es.search(index="quora", body={"query": {"match": {"question": inp_question}}})
# Semantic search
sem_search = es.search(
index="quora",
knn={"field": "question_vector", "query_vector": question_embedding, "k": 10, "num_candidates": 100},
)
print("Input question:", inp_question)
print(
"Computing the embedding took {:.3f} seconds, BM25 search took {:.3f} seconds, semantic search with ES took {:.3f} seconds".format(
encode_end_time - encode_start_time, bm25["took"] / 1000, sem_search["took"] / 1000
)
)
print("BM25 results:")
for hit in bm25["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\nSemantic Search results:")
for hit in sem_search["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\n\n========\n")
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are indexed to Elasticsearch together with their respective sentence
embeddings.
The script shows results from BM25 as well as from semantic search with
cosine similarity.
You need Elasticsearch up and running, for example using Docker
(https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html).
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
"""
from sentence_transformers import SentenceTransformer, util
import os
from elasticsearch import Elasticsearch, helpers
from ssl import create_default_context
import csv
import time
import tqdm.autonotebook
es = Elasticsearch(
hosts=["https://localhost:9200"],
basic_auth=("elastic", os.environ["ELASTIC_PASSWORD"]), # displayed at ES server startup
ssl_context=create_default_context(cafile="http_ca.crt"), # copied from inside ES container
)
model = SentenceTransformer("quora-distilbert-multilingual")
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
all_questions = {}
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
all_questions[row["qid1"]] = row["question1"]
if len(all_questions) >= max_corpus_size:
break
all_questions[row["qid2"]] = row["question2"]
if len(all_questions) >= max_corpus_size:
break
qids = list(all_questions.keys())
questions = [all_questions[qid] for qid in qids]
# Index data, if the index does not exists
if not es.indices.exists(index="quora"):
try:
es_index = {
"mappings": {
"properties": {
"question": {"type": "text"},
"question_vector": {"type": "dense_vector", "dims": 768, "index": True, "similarity": "cosine"},
}
}
}
es.indices.create(index="quora", body=es_index)
chunk_size = 500
print("Index data (you can stop it by pressing Ctrl+C once):")
with tqdm.tqdm(total=len(qids)) as pbar:
for start_idx in range(0, len(qids), chunk_size):
end_idx = start_idx + chunk_size
embeddings = model.encode(questions[start_idx:end_idx], show_progress_bar=False)
bulk_data = []
for qid, question, embedding in zip(qids[start_idx:end_idx], questions[start_idx:end_idx], embeddings):
bulk_data.append(
{
"_index": "quora",
"_id": qid,
"_source": {"question": question, "question_vector": embedding},
}
)
helpers.bulk(es, bulk_data)
pbar.update(chunk_size)
except Exception:
print("During index an exception occurred. Continue\n\n")
# Interactive search queries
while True:
inp_question = input("Please enter a question: ")
encode_start_time = time.time()
question_embedding = model.encode(inp_question)
encode_end_time = time.time()
# Lexical search
bm25 = es.search(index="quora", body={"query": {"match": {"question": inp_question}}})
# Semantic search
sem_search = es.search(
index="quora",
knn={"field": "question_vector", "query_vector": question_embedding, "k": 10, "num_candidates": 100},
)
print("Input question:", inp_question)
print(
"Computing the embedding took {:.3f} seconds, BM25 search took {:.3f} seconds, semantic search with ES took {:.3f} seconds".format(
encode_end_time - encode_start_time, bm25["took"] / 1000, sem_search["took"] / 1000
)
)
print("BM25 results:")
for hit in bm25["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\nSemantic Search results:")
for hit in sem_search["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\n\n========\n")
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Anchor Sparsity: Active Dimensions: 105.5, Sparsity Ratio: 0.9965
Model Positive Sparsity: Active Dimensions: 69.8, Sparsity Ratio: 0.9977
Model Negative Sparsity: Active Dimensions: 68.6, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model, ".")
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Sparsity Stats Query : Row Non-Zero Mean: 105.4530029296875, Row Sparsity Mean: 0.9965449571609497
Model Sparsity Stats Corpus : Row Non-Zero Mean: 69.18349838256836, Row Sparsity Mean: 0.9977333247661591
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
"""Retriever OpenAI agent."""
import deprecated
from typing import Any, cast
from llama_index.agent.openai_legacy.openai_agent import (
OpenAIAgent,
)
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.tools.types import BaseTool
@deprecated.deprecated(
reason=(
"FnRetrieverOpenAIAgent has been deprecated and is not maintained.\n\n"
"`FunctionAgent` is the recommended replacement.\n\n"
"See the docs for more information on updated agent usage: https://docs.llamaindex.ai/en/stable/understanding/agent/"
),
)
class FnRetrieverOpenAIAgent(OpenAIAgent):
"""
Function Retriever OpenAI Agent.
Uses our object retriever module to retrieve openai agent.
NOTE: This is deprecated, you can just use the base `OpenAIAgent` class by
specifying the following:
```
agent = OpenAIAgent.from_tools(tool_retriever=retriever, ...)
```
"""
@classmethod
def from_retriever(
cls, retriever: ObjectRetriever[BaseTool], **kwargs: Any
) -> "FnRetrieverOpenAIAgent":
return cast(
FnRetrieverOpenAIAgent, cls.from_tools(tool_retriever=retriever, **kwargs)
)
|
"""Retriever OpenAI agent."""
from typing import Any, cast
from llama_index.agent.openai_legacy.openai_agent import (
OpenAIAgent,
)
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.tools.types import BaseTool
class FnRetrieverOpenAIAgent(OpenAIAgent):
"""
Function Retriever OpenAI Agent.
Uses our object retriever module to retrieve openai agent.
NOTE: This is deprecated, you can just use the base `OpenAIAgent` class by
specifying the following:
```
agent = OpenAIAgent.from_tools(tool_retriever=retriever, ...)
```
"""
@classmethod
def from_retriever(
cls, retriever: ObjectRetriever[BaseTool], **kwargs: Any
) -> "FnRetrieverOpenAIAgent":
return cast(
FnRetrieverOpenAIAgent, cls.from_tools(tool_retriever=retriever, **kwargs)
)
|
import csv
import os
import random
import string
from torchaudio.datasets import fluentcommands
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
HEADER = ["", "path", "speakerId", "transcription", "action", "object", "location"]
SLOTS = ["action", "object", "location"]
ACTIONS = ["activate", "deactivate"]
OBJECTS = ["lights", "volume"]
LOCATIONS = ["none", "kitchen", "bedroom"]
NUM_SPEAKERS = 5
SAMPLES_PER_SPEAKER = 10
SAMPLE_RATE = 16000
def _gen_rand_str(n: int, seed: int):
random.seed(seed)
return "".join(random.choices(string.ascii_letters + string.digits, k=n))
def _gen_csv(dataset_dir: str, subset: str, init_seed: int):
data = []
data.append(HEADER)
idx = 0
seed = init_seed
for _ in range(NUM_SPEAKERS):
speaker_id = _gen_rand_str(5, seed=seed)
speaker_dir = os.path.join(dataset_dir, "wavs", "speakers", speaker_id)
os.makedirs(speaker_dir, exist_ok=True)
for _ in range(SAMPLES_PER_SPEAKER):
seed += 1
filename = _gen_rand_str(10, seed=seed)
path = f"wavs/speakers/{speaker_id}/{filename}.wav"
random.seed(seed)
transcription = ""
act = random.choice(ACTIONS)
obj = random.choice(OBJECTS)
loc = random.choice(LOCATIONS)
data.append([idx, path, speaker_id, transcription, act, obj, loc])
idx += 1
csv_path = os.path.join(dataset_dir, "data", f"{subset}_data.csv")
with open(csv_path, "w", newline="") as csv_file:
file_writer = csv.writer(csv_file)
file_writer.writerows(data)
return data
def _save_samples(dataset_dir: str, subset: str, seed: int):
# generate csv file
data = _gen_csv(dataset_dir, subset, seed)
# iterate through csv file, save wavs to corresponding files
header = data[0]
data = data[1:] # remove header
path_idx = header.index("path")
samples = []
for row in data:
wav = get_whitenoise(
sample_rate=SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
path = row[path_idx]
filename = path.split("/")[-1]
filename = filename.split(".")[0]
speaker_id, transcription, act, obj, loc = row[2:]
wav_file = os.path.join(dataset_dir, "wavs", "speakers", speaker_id, f"{filename}.wav")
save_wav(wav_file, wav, SAMPLE_RATE)
sample = wav, SAMPLE_RATE, filename, speaker_id, transcription, act, obj, loc
samples.append(sample)
seed += 1
return samples
def get_mock_dataset(dataset_dir: str):
data_folder = os.path.join(dataset_dir, "data")
wav_folder = os.path.join(dataset_dir, "wavs", "speakers")
os.makedirs(data_folder, exist_ok=True)
os.makedirs(wav_folder, exist_ok=True)
mocked_train_samples = _save_samples(dataset_dir, "train", 1)
mocked_valid_samples = _save_samples(dataset_dir, "valid", 111)
mocked_test_samples = _save_samples(dataset_dir, "test", 1111)
return mocked_train_samples, mocked_valid_samples, mocked_test_samples
class TestFluentSpeechCommands(TempDirMixin, TorchaudioTestCase):
root_dir = None
mocked_train_samples = []
mocked_valid_samples = []
mocked_test_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "fluent_speech_commands_dataset")
(
cls.mocked_train_samples,
cls.mocked_valid_samples,
cls.mocked_test_samples,
) = get_mock_dataset(dataset_dir)
def _testFluentCommands(self, dataset, samples):
num_samples = 0
for i, data in enumerate(dataset):
self.assertEqual(data, samples[i])
num_samples += 1
assert num_samples == len(samples)
def testFluentCommandsTrain(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="train")
self._testFluentCommands(dataset, self.mocked_train_samples)
def testFluentCommandsValid(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="valid")
self._testFluentCommands(dataset, self.mocked_valid_samples)
def testFluentCommandsTest(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="test")
self._testFluentCommands(dataset, self.mocked_test_samples)
|
import csv
import os
import random
import string
from torchaudio.datasets import fluentcommands
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
HEADER = ["", "path", "speakerId", "transcription", "action", "object", "location"]
SLOTS = ["action", "object", "location"]
ACTIONS = ["activate", "deactivate"]
OBJECTS = ["lights", "volume"]
LOCATIONS = ["none", "kitchen", "bedroom"]
NUM_SPEAKERS = 5
SAMPLES_PER_SPEAKER = 10
SAMPLE_RATE = 16000
def _gen_rand_str(n: int, seed: int):
random.seed(seed)
return "".join(random.choices(string.ascii_letters + string.digits, k=n))
def _gen_csv(dataset_dir: str, subset: str, init_seed: int):
data = []
data.append(HEADER)
idx = 0
seed = init_seed
for _ in range(NUM_SPEAKERS):
speaker_id = _gen_rand_str(5, seed=seed)
speaker_dir = os.path.join(dataset_dir, "wavs", "speakers", speaker_id)
os.makedirs(speaker_dir, exist_ok=True)
for _ in range(SAMPLES_PER_SPEAKER):
seed += 1
filename = _gen_rand_str(10, seed=seed)
path = f"wavs/speakers/{speaker_id}/{filename}.wav"
random.seed(seed)
transcription = ""
act = random.choice(ACTIONS)
obj = random.choice(OBJECTS)
loc = random.choice(LOCATIONS)
data.append([idx, path, speaker_id, transcription, act, obj, loc])
idx += 1
csv_path = os.path.join(dataset_dir, "data", f"{subset}_data.csv")
with open(csv_path, "w", newline="") as csv_file:
file_writer = csv.writer(csv_file)
file_writer.writerows(data)
return data
def _save_samples(dataset_dir: str, subset: str, seed: int):
# generate csv file
data = _gen_csv(dataset_dir, subset, seed)
# iterate through csv file, save wavs to corresponding files
header = data[0]
data = data[1:] # remove header
path_idx = header.index("path")
samples = []
for row in data:
wav = get_whitenoise(
sample_rate=SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
path = row[path_idx]
filename = path.split("/")[-1]
filename = filename.split(".")[0]
speaker_id, transcription, act, obj, loc = row[2:]
wav_file = os.path.join(dataset_dir, "wavs", "speakers", speaker_id, f"{filename}.wav")
save_wav(wav_file, wav, SAMPLE_RATE)
sample = wav, SAMPLE_RATE, filename, speaker_id, transcription, act, obj, loc
samples.append(sample)
seed += 1
return samples
def get_mock_dataset(dataset_dir: str):
data_folder = os.path.join(dataset_dir, "data")
wav_folder = os.path.join(dataset_dir, "wavs", "speakers")
os.makedirs(data_folder, exist_ok=True)
os.makedirs(wav_folder, exist_ok=True)
mocked_train_samples = _save_samples(dataset_dir, "train", 1)
mocked_valid_samples = _save_samples(dataset_dir, "valid", 111)
mocked_test_samples = _save_samples(dataset_dir, "test", 1111)
return mocked_train_samples, mocked_valid_samples, mocked_test_samples
class TestFluentSpeechCommands(TempDirMixin, TorchaudioTestCase):
root_dir = None
backend = "default"
mocked_train_samples = []
mocked_valid_samples = []
mocked_test_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "fluent_speech_commands_dataset")
(
cls.mocked_train_samples,
cls.mocked_valid_samples,
cls.mocked_test_samples,
) = get_mock_dataset(dataset_dir)
def _testFluentCommands(self, dataset, samples):
num_samples = 0
for i, data in enumerate(dataset):
self.assertEqual(data, samples[i])
num_samples += 1
assert num_samples == len(samples)
def testFluentCommandsTrain(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="train")
self._testFluentCommands(dataset, self.mocked_train_samples)
def testFluentCommandsValid(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="valid")
self._testFluentCommands(dataset, self.mocked_valid_samples)
def testFluentCommandsTest(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="test")
self._testFluentCommands(dataset, self.mocked_test_samples)
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
from sentence_transformers import SentenceTransformer
import torch
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
from sentence_transformers import SentenceTransformer
import torch
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
top_results = torch.topk(similarity_scores, k=top_k)
print("\n\n======================\n\n")
print("Query:", query)
print("\nTop 5 most similar sentences in corpus:")
for score, idx in zip(top_results[0], top_results[1]):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import T, Document
def _reduce_doc_props(doc1: 'Document', doc2: 'Document'):
doc1_fields = set(doc1.non_empty_fields)
doc2_fields = set(doc2.non_empty_fields)
# update only fields that are set in doc2 and not set in doc1
fields = doc2_fields - doc1_fields
fields = fields - {'matches', 'chunks', 'id', 'parent_id'}
for field in fields:
setattr(doc1, field, getattr(doc2, field))
class ReduceMixin:
"""
A mixin that provides reducing logic for :class:`DocumentArray`
Reducing 2 or more DocumentArrays consists in merging all Documents into the same DocumentArray.
If a Document belongs to 2 or more DocumentArrays, it is added once and data attributes are merged with priority to
the Document belonging to the left-most DocumentArray. Matches and chunks are also reduced in the same way.
Reduction is applied to all levels of DocumentArrays, that is, from root Documents to all their chunk and match
children.
"""
def reduce(self: 'T', other: 'T') -> 'T':
"""
Reduces other and the current DocumentArray into one DocumentArray in-place. Changes are applied to the current
DocumentArray.
Reducing 2 DocumentArrays consists in adding Documents in the second DocumentArray to the first DocumentArray
if they do not exist. If a Document exists in both DocumentArrays, the data properties are merged with priority
to the first Document (that is, to the current DocumentArray's Document). The matches and chunks are also
reduced in the same way.
:param other: DocumentArray
:return: DocumentArray
"""
for doc in other:
if doc.id in self:
self._reduce_doc(self[doc.id], doc)
else:
self.append(doc)
return self
@staticmethod
def _reduce_doc(doc1: 'Document', doc2: 'Document'):
"""
Reduces doc1 and doc2 into one Document in-place. Changes are applied to doc1.
Reducing 2 Documents consists in setting data properties of the second Document to the first Document if they
are empty (that is, priority to the left-most Document) and reducing the matches and the chunks of both
documents.
Non-data properties are ignored.
Reduction of matches and chunks relies on :class:`DocumentArray`.:method:`reduce`.
:param doc1: first Document
:param doc2: second Document
"""
_reduce_doc_props(doc1, doc2)
if len(doc2.matches) > 0:
doc1.matches.reduce(doc2.matches)
if len(doc2.chunks) > 0:
doc1.chunks.reduce(doc2.chunks)
def reduce_all(self: 'T', others: List['T']) -> 'T':
"""
Reduces a list of DocumentArrays and this DocumentArray into one DocumentArray. Changes are applied to this
DocumentArray in-place.
Reduction consists in reducing this DocumentArray with every DocumentArray in `others` sequentially using
:class:`DocumentArray`.:method:`reduce`.
The resulting DocumentArray contains Documents of all DocumentArrays.
If a Document exists in many DocumentArrays, data properties are merged with priority to the left-most
DocumentArrays (that is, if a data attribute is set in a Document belonging to many DocumentArrays, the
attribute value of the left-most DocumentArray is kept).
Matches and chunks of a Document belonging to many DocumentArrays are also reduced in the same way.
Other non-data properties are ignored.
.. note::
- Matches are not kept in a sorted order when they are reduced. You might want to re-sort them in a later
step.
- The final result depends on the order of DocumentArrays when applying reduction.
:param others: List of DocumentArrays to be reduced
:return: the resulting DocumentArray
"""
for da in others:
self.reduce(da)
return self
|
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from ...typing import T, Document
def _reduce_doc_props(doc1: 'Document', doc2: 'Document'):
doc1_fields = set(doc1.non_empty_fields)
doc2_fields = set(doc2.non_empty_fields)
# update only fields that are set in doc2 and not set in doc1
fields = doc2_fields - doc1_fields
fields = fields - {'matches', 'chunks', 'id', 'parent_id'}
for field in fields:
setattr(doc1, field, getattr(doc2, field))
class ReduceMixin:
"""
A mixin that provides reducing logic for :class:`DocumentArray`
Reducing 2 or more DocumentArrays consists in merging all Documents into the same DocumentArray.
If a Document belongs to 2 or more DocumentArrays, it is added once and data attributes are merged with priority to
the Document belonging to the left-most DocumentArray. Matches and chunks are also reduced in the same way.
Reduction is applied to all levels of DocumentArrays, that is, from root Documents to all their chunk and match
children.
"""
def reduce(self: 'T', other: 'T') -> 'T':
"""
Reduces other and the current DocumentArray into one DocumentArray in-place. Changes are applied to the current
DocumentArray.
Reducing 2 DocumentArrays consists in adding Documents in the second DocumentArray to the first DocumentArray
if they do not exist. If a Document exists in both DocumentArrays, the data properties are merged with priority
to the first Document (that is, to the current DocumentArray's Document). The matches and chunks are also
reduced in the same way.
:param other: DocumentArray
:return: DocumentArray
"""
for doc in other:
if doc.id in self:
self._reduce_doc(self[doc.id], doc)
else:
self.append(doc)
return self
@staticmethod
def _reduce_doc(doc1: 'Document', doc2: 'Document'):
"""
Reduces doc1 and doc2 into one Document in-place. Changes are applied to doc1.
Reducing 2 Documents consists in setting data properties of the second Document to the first Document if they
are empty (that is, priority to the left-most Document) and reducing the matches and the chunks of both
documents.
Non-data properties are ignored.
Reduction of matches and chunks relies on :class:`DocumentArray`.:method:`reduce`.
:param doc1: first Document
:param doc2: second Document
"""
_reduce_doc_props(doc1, doc2)
if len(doc2.matches) > 0:
doc1.matches.reduce(doc2.matches)
if len(doc2.chunks) > 0:
doc1.chunks.reduce(doc2.chunks)
def reduce_all(self: 'T', others: List['T']) -> 'T':
"""
Reduces a list of DocumentArrays and this DocumentArray into one DocumentArray. Changes are applied to this
DocumentArray in-place.
Reduction consists in reducing this DocumentArray with every DocumentArray in `others` sequentially using
:class:`DocumentArray`.:method:`reduce`.
The resulting DocumentArray contains Documents of all DocumentArrays.
If a Document exists in many DocumentArrays, data properties are merged with priority to the left-most
DocumentArrays (that is, if a data attribute is set in a Document belonging to many DocumentArrays, the
attribute value of the left-most DocumentArray is kept).
Matches and chunks of a Document belonging to many DocumentArrays are also reduced in the same way.
Other non-data properties are ignored.
.. note::
- Matches are not kept in a sorted order when they are reduced. You might want to re-sort them in a later
step.
- The final result depends on the order of DocumentArrays when applying reduction.
:param others: List of DocumentArrays to be reduced
:return: the resulting DocumentArray
"""
for da in others:
self.reduce(da)
return self
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
train_cfg = dict(by_epoch=True, max_epochs=8) # actual epoch = 8 * 8 = 64
val_cfg = dict(interval=1)
test_cfg = dict()
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = './vfnet_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
bbox_head=dict(dcn_on_last_conv=True))
|
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
bbox_head=dict(dcn_on_last_conv=True))
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[
0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress((video_reader, len(video_reader))):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[
0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress(video_reader):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
from urllib.parse import urlparse
r = urlparse(host)
_on = r.path or '/'
_port = r.port or None
standardized_host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))
._replace(path='')
.geturl()
)
batch_size = batch_size or len(self)
_scheme = r.scheme
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=standardized_host)
with f:
return f.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
elif _scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if _port:
standardized_host += f':{_port}'
c = Client(host=standardized_host)
return c.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
else:
raise ValueError(f'unsupported scheme: {r.scheme}')
|
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
from urllib.parse import urlparse
r = urlparse(host)
_on = r.path or '/'
_port = r.port or None
standardized_host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))
._replace(path='')
.geturl()
)
batch_size = batch_size or len(self)
_scheme = r.scheme
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=standardized_host)
with f:
return f.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
elif _scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
standardized_host = standardized_host + (f':{_port}' or '')
c = Client(host=standardized_host)
return c.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
else:
raise ValueError(f'unsupported scheme: {r.scheme}')
|
"""Init file of LlamaIndex."""
__version__ = "0.12.21"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.20"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../common/lsj-200e_coco-detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../common/lsj_200e_coco_detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
# TODO : Watch for similaty default value as rn cosine but dot product might be better for sparse
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, List, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from .models import EmbeddingModelWrapper
class ImageTorchEncoder(Executor):
"""
:class:`ImageTorchEncoder` encodes ``Document`` blobs of type `ndarray` (`float32`) and shape
`H x W x C` into `ndarray` of shape `D`.
Where `D` is the Dimension of the embedding.
If `use_default_preprocessing=False`, the expected input shape is `C x H x W` with `float32` dtype.
:class:`ImageTorchEncoder` fills the `embedding` fields of `Documents` with an `ndarray` of shape `embedding_dim`
(size depends on the model) with `dtype=float32`.
Internally, :class:`ImageTorchEncoder` wraps the models from
`torchvision.models`.
https://pytorch.org/vision/stable/models.html
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
default_traversal_path: Tuple = ('r',),
default_batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Some of the models:
``alexnet``, `squeezenet1_0``, ``vgg16``,
``densenet161``, ``inception_v3``, ``googlenet``,
``shufflenet_v2_x1_0``, ``mobilenet_v2``,
``mnasnet1_0``, ``resnet18``. See full list above.
:param device: Which device the model runs on. Can be 'cpu' or 'cuda'
:param default_traversal_paths: Used in the encode method an defines traversal on the received `DocumentArray`
:param default_batch_size: Defines the batch size for inference on the loaded PyTorch model.
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self.device = device
self.default_batch_size = default_batch_size
self.use_default_preprocessing = use_default_preprocessing
self.default_traversal_path = default_traversal_path
# axis 0 is the batch
self._default_channel_axis = 1
self.model_wrapper = EmbeddingModelWrapper(model_name, device=self.device)
self._preprocess = T.Compose(
[
T.ToPILImage('RGB'),
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data into a ndarray of `D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
docs_batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_path
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._compute_embeddings(docs_batch_generator)
def _compute_embeddings(self, docs_batch_generator: Iterable) -> None:
with torch.no_grad():
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack(self._preprocess_image(blob_batch))
else:
images = np.stack(blob_batch)
features = self.model_wrapper.compute_embeddings(images)
for doc, embed in zip(document_batch, features):
doc.embedding = embed
def _preprocess_image(self, images: List[np.array]) -> List[np.ndarray]:
return [self._preprocess(img) for img in images]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, List, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from .models import EmbeddingModelWrapper
class ImageTorchEncoder(Executor):
"""
:class:`ImageTorchEncoder` encodes ``Document`` blobs of type `ndarray` (`float32`) and shape
`H x W x C` into `ndarray` of shape `D`.
Where `D` is the Dimension of the embedding.
If `use_default_preprocessing=False`, the expected input shape is `C x H x W` with `float32` dtype.
:class:`ImageTorchEncoder` fills the `embedding` fields of `Documents` with an `ndarray` of shape `embedding_dim`
(size depends on the model) with `dtype=float32`.
Internally, :class:`ImageTorchEncoder` wraps the models from
`torchvision.models`.
https://pytorch.org/vision/stable/models.html
:param model_name: the name of the model. Some of the models:
``alexnet``, `squeezenet1_0``, ``vgg16``,
``densenet161``, ``inception_v3``, ``googlenet``,
``shufflenet_v2_x1_0``, ``mobilenet_v2``,
``mnasnet1_0``, ``resnet18``. See full list above.
:param device: Which device the model runs on. Can be 'cpu' or 'cuda'
:param default_traversal_paths: Used in the encode method an defines traversal on the received `DocumentArray`
:param default_batch_size: Defines the batch size for inference on the loaded PyTorch model.
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
default_traversal_path: Tuple = ('r',),
default_batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self.device = device
self.default_batch_size = default_batch_size
self.use_default_preprocessing = use_default_preprocessing
self.default_traversal_path = default_traversal_path
# axis 0 is the batch
self._default_channel_axis = 1
self.model_wrapper = EmbeddingModelWrapper(model_name, device=self.device)
self._preprocess = T.Compose(
[
T.ToPILImage('RGB'),
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data into a ndarray of `D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
docs_batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_path
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._compute_embeddings(docs_batch_generator)
def _compute_embeddings(self, docs_batch_generator: Iterable) -> None:
with torch.no_grad():
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack(self._preprocess_image(blob_batch))
else:
images = np.stack(blob_batch)
features = self.model_wrapper.compute_embeddings(images)
for doc, embed in zip(document_batch, features):
doc.embedding = embed
def _preprocess_image(self, images: List[np.array]) -> List[np.ndarray]:
return [self._preprocess(img) for img in images]
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
self.loader = loader
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
_base_ = [
'./sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain'
'_test-mot17halfval.py'
]
# dataloader
val_dataloader = dict(
dataset=dict(ann_file='annotations/train_cocoformat.json'))
test_dataloader = dict(
dataset=dict(
ann_file='annotations/test_cocoformat.json',
data_prefix=dict(img_path='test')))
# evaluator
test_evaluator = dict(format_only=True, outfile_prefix='./mot_17_test_res')
|
_base_ = [
'./sort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain'
'_test-mot17halfval.py'
]
model = dict(
detector=dict(
init_cfg=dict(
type='Pretrained',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth' # noqa: E501
)))
# dataloader
val_dataloader = dict(
dataset=dict(ann_file='annotations/train_cocoformat.json'))
test_dataloader = dict(
dataset=dict(
ann_file='annotations/test_cocoformat.json',
data_prefix=dict(img_path='test')))
# evaluator
test_evaluator = dict(format_only=True, outfile_prefix='./mot_17_test_res')
|
# Copyright (c) OpenMMLab. All rights reserved.
# This file add snake case alias for coco api
import warnings
from collections import defaultdict
from typing import List, Optional, Union
import pycocotools
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval as _COCOeval
class COCO(_COCO):
"""This class is almost the same as official pycocotools package.
It implements some snake case function aliases. So that the COCO class has
the same interface as LVIS class.
"""
def __init__(self, annotation_file=None):
if getattr(pycocotools, '__version__', '0') >= '12.0.2':
warnings.warn(
'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501
UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
# just for the ease of import
COCOeval = _COCOeval
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str, optional): Path of annotation file.
Defaults to None.
"""
def __init__(self, annotation_file: Optional[str] = None) -> None:
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self) -> None:
"""Create index."""
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann, img_info in zip(self.dataset['annotations'],
self.dataset['images']):
img_info['segm_file'] = ann['file_name']
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
seg_ann['height'] = img_info['height']
seg_ann['width'] = img_info['width']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self,
ids: Union[List[int], int] = []) -> Optional[List[dict]]:
"""Load anns with the specified ids.
``self.anns`` is a list of annotation lists instead of a
list of annotations.
Args:
ids (Union[List[int], int]): Integer ids specifying anns.
Returns:
anns (List[dict], optional): Loaded ann objects.
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
|
# Copyright (c) OpenMMLab. All rights reserved.
# This file add snake case alias for coco api
import warnings
import pycocotools
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval as _COCOeval
class COCO(_COCO):
"""This class is almost the same as official pycocotools package.
It implements some snake case function aliases. So that the COCO class has
the same interface as LVIS class.
"""
def __init__(self, annotation_file=None):
if getattr(pycocotools, '__version__', '0') >= '12.0.2':
warnings.warn(
'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501
UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
# just for the ease of import
COCOeval = _COCOeval
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]:
if not torch.jit.is_scripting():
_log_api_usage_once(erase)
if isinstance(inpt, torch.Tensor) and (
torch.jit.is_scripting() or not isinstance(inpt, (datapoints.Image, datapoints.Video))
):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]:
if isinstance(inpt, torch.Tensor) and (
torch.jit.is_scripting() or not isinstance(inpt, (datapoints.Image, datapoints.Video))
):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.