input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)",
re.MULTILINE | re.DOTALL,
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = match.group("yaml") if match else text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> type[T]:
return self.pydantic_object
|
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = match.group("yaml") if match else text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> type[T]:
return self.pydantic_object
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from llama_index.llms.meta.base import LlamaLLM
__all__ = ["LlamaLLM"]
|
from llama_index.llms.meta.base import LlamaLLM
__all__ = ["LlamaLLM"]
|
from jina import Client, Document, Executor, Flow, requests
def validate_results(results):
req = results[0]
assert len(req.docs) == 1
assert len(req.docs[0].matches) == 5
assert len(req.docs[0].matches[0].matches) == 5
assert len(req.docs[0].matches[-1].matches) == 5
assert len(req.docs[0].matches[0].matches[0].matches) == 0
class MatchAdder(Executor):
def __init__(self, traversal_paths='r', **kwargs):
super().__init__(**kwargs)
self._traversal_paths = traversal_paths
@requests(on='index')
def index(self, docs, **kwargs):
for path_docs in docs.traverse(self._traversal_paths):
for doc in path_docs:
for i in range(5):
doc.matches.append(Document())
def test_single_executor(port_generator):
exposed_port = port_generator()
f = Flow(port=exposed_port).add(
uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r,m'}}
)
with f:
results = Client(port=exposed_port).post(
on='index', inputs=Document(), return_responses=True
)
validate_results(results)
def test_multi_executor(port_generator):
exposed_port = port_generator()
f = (
Flow(port=exposed_port)
.add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r'}})
.add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'm'}})
)
with f:
results = Client(port=exposed_port).post(
on='index', inputs=Document(), return_responses=True
)
validate_results(results)
|
from jina import Client, Document, Executor, Flow, requests
exposed_port = 12345
def validate_results(results):
req = results[0]
assert len(req.docs) == 1
assert len(req.docs[0].matches) == 5
assert len(req.docs[0].matches[0].matches) == 5
assert len(req.docs[0].matches[-1].matches) == 5
assert len(req.docs[0].matches[0].matches[0].matches) == 0
class MatchAdder(Executor):
def __init__(self, traversal_paths='r', **kwargs):
super().__init__(**kwargs)
self._traversal_paths = traversal_paths
@requests(on='index')
def index(self, docs, **kwargs):
for path_docs in docs.traverse(self._traversal_paths):
for doc in path_docs:
for i in range(5):
doc.matches.append(Document())
def test_single_executor():
f = Flow(port=exposed_port).add(
uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r,m'}}
)
with f:
results = Client(port=exposed_port).post(
on='index', inputs=Document(), return_responses=True
)
validate_results(results)
def test_multi_executor():
f = (
Flow(port=exposed_port)
.add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r'}})
.add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'm'}})
)
with f:
results = Client(port=exposed_port).post(
on='index', inputs=Document(), return_responses=True
)
validate_results(results)
|
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# param_scheduler = [
# dict(
# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa
# dict(
# type='MultiStepLR',
# begin=0,
# end=12,
# by_epoch=True,
# milestones=[28, 34],
# gamma=0.1)
# ]
_base_.param_scheduler[1].milestones = [28, 34]
train_cfg = dict(max_epochs=36)
|
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def validate_data(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
self._annlite.delete([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
from typing import Iterable, Dict
from .helper import OffsetMapping
from ..base.getsetdel import BaseGetSetDelMixin
from ..base.helper import Offset2ID
from ...memory import DocumentArrayInMemory
from .... import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
self._annlite.delete([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
SMTP = "smtp"
TWITTER = "twitter"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
TWITTER = "twitter"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
# If you turn a schema class that inherits from union into a dataclass, please use
# this decorator to configure it. It's safe, faster and allows code sharing.
#
# For example, _union_dataclass customizes the __eq__ method to only check the type
# and value property instead of default implmentation of dataclass which goes
# through every field in the dataclass.
@dataclass_transform(eq_default=False)
def _union_dataclass(cls: type[T]) -> type[T]:
assert issubclass(cls, _Union), f"{cls} must inheirt from {_Union}."
return dataclass(repr=False, eq=False)(cls)
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Union):
return False
return self.type == other.type and self.value == other.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
# If you turn a schema class that inherits from union into a dataclass, please use
# this decorator to configure it. It's safe, faster and allows code sharing.
#
# For example, _union_dataclass customizes the __eq__ method to only check the type
# and value property instead of default implementation of dataclass which goes
# through every field in the dataclass.
@dataclass_transform(eq_default=False)
def _union_dataclass(cls: type[T]) -> type[T]:
assert issubclass(cls, _Union), f"{cls} must inheirt from {_Union}."
return dataclass(repr=False, eq=False)(cls)
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Union):
return False
return self.type == other.type and self.value == other.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
"""Common structures for structured indices."""
from dataclasses import dataclass
from typing import Dict, Optional
from dataclasses_json import DataClassJsonMixin
# TODO: migrate this to be a data_struct
@dataclass
class SQLContextContainer(DataClassJsonMixin):
"""
SQLContextContainer.
A container interface to store context for a given table.
Context can be built from unstructured documents (e.g. using SQLContextBuilder).
Context can also be dumped to an underlying LlamaIndex data structure.
Contains both the raw context_dict as well as any index_structure.
Should be not be used directly - build one from SQLContextContainerBuilder instead.
"""
context_dict: Optional[Dict[str, str]] = None
context_str: Optional[str] = None
|
"""Common structures for structured indices."""
from dataclasses import dataclass
from typing import Dict, Optional
from dataclasses_json import DataClassJsonMixin
# TODO: migrate this to be a data_struct
@dataclass
class SQLContextContainer(DataClassJsonMixin):
"""
SQLContextContainer.
A container interface to store context for a given table.
Context can be built from unstructured documents (e.g. using SQLContextBuilder).
Context can also be dumped to an underlying LlamaIndex data structure.
Contains both the raw context_dict as well as any index_structure.
Should be not be used directly - build one from SQLContextContainerBuilder instead.
"""
context_dict: Optional[Dict[str, str]] = None
context_str: Optional[str] = None
|
# Owner(s): ["module: inductor"]
from unittest.mock import patch
import torch
from torch._inductor import config
from torch._inductor.async_compile import AsyncCompile, shutdown_compile_workers
from torch._inductor.runtime.triton_compat import Config
from torch._inductor.runtime.triton_heuristics import (
generate_lookup_hash_from_source_code,
)
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import fresh_cache
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
)
from torch.testing._internal.inductor_utils import (
GPU_TYPE,
requires_gpu,
requires_triton,
)
@instantiate_parametrized_tests
class TestAsyncCompile(TestCase):
@requires_gpu()
@requires_triton()
@parametrize("method", ("subprocess", "fork", "spawn"))
def test_pool(self, method):
def fn(x, y):
return x + y
x = torch.rand(10).to(GPU_TYPE)
y = torch.rand(10).to(GPU_TYPE)
with config.patch("worker_start_method", method):
shutdown_compile_workers()
AsyncCompile.wait_pool_ready()
with fresh_cache():
compiled_fn = torch.compile(fn)
self.assertEqual(fn(x, y), compiled_fn(x, y))
@requires_gpu()
@requires_triton()
@patch("torch._inductor.runtime.coordinate_descent_tuner.CoordescTuner.autotune")
@parametrize("method", ("subprocess", "fork", "spawn"))
def test_autotune_lookup_table(self, mock_autotune, method):
def f(a, b):
return (a @ b).to(torch.float32).sum(dim=1)
# Fake name to make sure the lookup table is name agnostic
func_def = """
def triton_fused_fake_name(in_ptr0, out_ptr0, xnumel, r0_numel, XBLOCK : tl.constexpr, R0_BLOCK : tl.constexpr):
xnumel = 1024
r0_numel = 11776
rnumel = r0_numel
RBLOCK: tl.constexpr = R0_BLOCK
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
r0_base = tl.arange(0, R0_BLOCK)[None, :]
rbase = r0_base
x0 = xindex
_tmp3 = tl.full([XBLOCK, R0_BLOCK], 0, tl.float32)
for r0_offset in range(0, r0_numel, R0_BLOCK):
r0_index = r0_offset + r0_base
r0_mask = r0_index < r0_numel
roffset = r0_offset
rindex = r0_index
r0_1 = r0_index
tmp0 = tl.load(in_ptr0 + (r0_1 + 11776*x0), r0_mask & xmask, eviction_policy='evict_first', other=0.0).to(tl.float32)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, R0_BLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(r0_mask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp3, xmask)
"""
fn_hash = generate_lookup_hash_from_source_code(func_def)
block_configs = {
"XBLOCK": 1,
"R0_BLOCK": 128,
}
num_warps = 16
num_stages = 1
autotune_lookup_table = {
fn_hash: {**block_configs, "num_warps": num_warps, "num_stages": num_stages}
}
autotune_config = Config(
block_configs, num_warps=num_warps, num_stages=num_stages
)
mock_autotune.return_value = autotune_config
a = torch.randn(1152, 1024, device=GPU_TYPE, dtype=torch.float16).T
b = torch.randn(1152, 11776, device=GPU_TYPE, dtype=torch.float16)
compiled_f = torch.compile(f)
with config.patch(
{
"autotune_lookup_table": autotune_lookup_table,
"coordinate_descent_tuning": True,
"worker_start_method": method,
}
):
shutdown_compile_workers()
AsyncCompile.wait_pool_ready()
with fresh_cache():
compiled_f(a, b)
# Check that the input to coordinate descent (the resulting chosen config)
# is the same as the one in the lookup table
mock_autotune.assert_called_once()
args, _ = mock_autotune.call_args
self.assertTrue(isinstance(args[1], Config))
self.assertEqual(args[1].kwargs, autotune_config.kwargs)
self.assertEqual(args[1].num_warps, autotune_config.num_warps)
self.assertEqual(args[1].num_stages, autotune_config.num_stages)
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["module: inductor"]
import torch
from torch._inductor import config
from torch._inductor.async_compile import AsyncCompile, shutdown_compile_workers
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import fresh_cache
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
)
from torch.testing._internal.inductor_utils import (
GPU_TYPE,
requires_gpu,
requires_triton,
)
@instantiate_parametrized_tests
class TestAsyncCompile(TestCase):
@requires_gpu()
@requires_triton()
@parametrize("method", ("subprocess", "fork", "spawn"))
def test_pool(self, method):
def fn(x, y):
return x + y
x = torch.rand(10).to(GPU_TYPE)
y = torch.rand(10).to(GPU_TYPE)
with config.patch("worker_start_method", method):
shutdown_compile_workers()
AsyncCompile.wait_pool_ready()
with fresh_cache():
compiled_fn = torch.compile(fn)
self.assertEqual(fn(x, y), compiled_fn(x, y))
if __name__ == "__main__":
run_tests()
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
all_ids = docs[:, 'id']
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
for m_id in filter(lambda id: id not in missing_id, all_ids):
assert m_id in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('weaviate', {'n_dim': 3, 'distance': 'l2-squared'}),
('annlite', {'n_dim': 3, 'metric': 'Euclidean'}),
('qdrant', {'n_dim': 3, 'distance': 'euclidean'}),
('elasticsearch', {'n_dim': 3, 'distance': 'l2_norm'}),
('sqlite', dict()),
('redis', {'n_dim': 3, 'distance': 'L2', 'flush': True}),
],
)
def test_del_subindex(storage, config):
n_dim = 3
subindex_configs = (
{'@c': dict()} if storage in ['sqlite', 'memory'] else {'@c': {'n_dim': 2}}
)
da = DocumentArray(
storage=storage,
config=config,
subindex_configs=subindex_configs,
)
with da:
da.extend(
[
Document(
id=str(i),
embedding=i * np.ones(n_dim),
chunks=[
Document(id=str(i) + '_0', embedding=np.array([i, i])),
Document(id=str(i) + '_1', embedding=np.array([i, i])),
],
)
for i in range(10)
]
)
del da['0']
assert len(da) == 9
assert len(da._subindices['@c']) == 18
del da[-2:]
assert len(da) == 7
assert len(da._subindices['@c']) == 14
def test_del_subindex_annlite_multimodal():
from docarray import dataclass
from docarray.typing import Text
@dataclass
class MMDoc:
my_text: Text
my_other_text: Text
n_dim = 3
da = DocumentArray(
storage='annlite',
config={'n_dim': n_dim, 'metric': 'Euclidean'},
subindex_configs={'@.[my_text, my_other_text]': {'n_dim': 2}},
)
num_docs = 10
docs_to_add = DocumentArray(
[
Document(MMDoc(my_text='hello', my_other_text='world'))
for _ in range(num_docs)
]
)
for i, d in enumerate(docs_to_add):
d.id = str(i)
d.embedding = i * np.ones(n_dim)
d.my_text.id = str(i) + '_0'
d.my_text.embedding = [i, i]
d.my_other_text.id = str(i) + '_1'
d.my_other_text.embedding = [i, i]
with da:
da.extend(docs_to_add)
del da['0']
assert len(da) == 9
assert len(da._subindices['@.[my_text, my_other_text]']) == 18
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('weaviate', {'n_dim': 3, 'distance': 'l2-squared'}),
('annlite', {'n_dim': 3, 'metric': 'Euclidean'}),
('qdrant', {'n_dim': 3, 'distance': 'euclidean'}),
('elasticsearch', {'n_dim': 3, 'distance': 'l2_norm'}),
('sqlite', dict()),
('redis', {'n_dim': 3, 'distance': 'L2', 'flush': True}),
],
)
def test_del_subindex(storage, config):
n_dim = 3
subindex_configs = (
{'@c': dict()} if storage in ['sqlite', 'memory'] else {'@c': {'n_dim': 2}}
)
da = DocumentArray(
storage=storage,
config=config,
subindex_configs=subindex_configs,
)
with da:
da.extend(
[
Document(
id=str(i),
embedding=i * np.ones(n_dim),
chunks=[
Document(id=str(i) + '_0', embedding=np.array([i, i])),
Document(id=str(i) + '_1', embedding=np.array([i, i])),
],
)
for i in range(10)
]
)
del da['0']
assert len(da) == 9
assert len(da._subindices['@c']) == 18
del da[-2:]
assert len(da) == 7
assert len(da._subindices['@c']) == 14
def test_del_subindex_annlite_multimodal():
from docarray import dataclass
from docarray.typing import Text
@dataclass
class MMDoc:
my_text: Text
my_other_text: Text
n_dim = 3
da = DocumentArray(
storage='annlite',
config={'n_dim': n_dim, 'metric': 'Euclidean'},
subindex_configs={'@.[my_text, my_other_text]': {'n_dim': 2}},
)
num_docs = 10
docs_to_add = DocumentArray(
[
Document(MMDoc(my_text='hello', my_other_text='world'))
for _ in range(num_docs)
]
)
for i, d in enumerate(docs_to_add):
d.id = str(i)
d.embedding = i * np.ones(n_dim)
d.my_text.id = str(i) + '_0'
d.my_text.embedding = [i, i]
d.my_other_text.id = str(i) + '_1'
d.my_other_text.embedding = [i, i]
with da:
da.extend(docs_to_add)
del da['0']
assert len(da) == 9
assert len(da._subindices['@.[my_text, my_other_text]']) == 18
|
"""Helper functions for managing the LangChain API.
This module is only relevant for LangChain developers, not for users.
.. warning::
This module and its submodules are for internal use only. Do not use them
in your own code. We may change the API at any time with no warning.
"""
from .deprecation import (
LangChainDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
from .module_import import create_importer
__all__ = [
"LangChainDeprecationWarning",
"create_importer",
"deprecated",
"suppress_langchain_deprecation_warning",
"surface_langchain_deprecation_warnings",
"warn_deprecated",
]
|
"""Helper functions for managing the LangChain API.
This module is only relevant for LangChain developers, not for users.
.. warning::
This module and its submodules are for internal use only. Do not use them
in your own code. We may change the API at any time with no warning.
"""
from .deprecation import (
LangChainDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
from .module_import import create_importer
__all__ = [
"deprecated",
"LangChainDeprecationWarning",
"suppress_langchain_deprecation_warning",
"surface_langchain_deprecation_warnings",
"warn_deprecated",
"create_importer",
]
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
# doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = doc_2.url.load().video
# doc_2.video_tensor.save(file_path='file_2.wav')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoTorchTensor, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
deprecated_function, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, get_installed_path, install_package,
is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path', 'install_package',
'is_abs', 'is_method_overridden', 'has_method', 'digit_version',
'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer', 'check_time',
'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress', 'deprecated_function'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, get_installed_path, install_package,
is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path', 'install_package',
'is_abs', 'is_method_overridden', 'has_method', 'digit_version',
'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer', 'check_time',
'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress'
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
|
from torchaudio._internal.module_utils import dropping_support
from ._alignment import forced_align as _forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
forced_align = dropping_support(_forced_align)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
frechet_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss as _rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
rnnt_loss = dropping_support(_rnnt_loss)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
"frechet_distance",
]
|
from ._alignment import forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
frechet_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
"frechet_distance",
]
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls,
value: list[BaseMemory],
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory) and val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}",
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls, value: list[BaseMemory]
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory) and val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}"
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._typing import is_tensor_union, is_type_tensor
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
from docarray.typing import TensorFlowTensor
else:
TensorFlowTensor = None
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_tensor',
[
(TensorFlowTensor, True),
(Optional[TensorFlowTensor], False),
],
)
def test_is_type_tensor_with_tf(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(TensorFlowTensor, False),
(Optional[TensorFlowTensor], True),
(Union[NdArray, TorchTensor, TensorFlowTensor], True),
(Union[NdArray, TorchTensor, Optional[TensorFlowTensor]], True),
],
)
def test_is_union_type_tensor_with_tf(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
|
from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._typing import is_tensor_union, is_type_tensor
try:
from docarray.typing import TensorFlowTensor
except (ImportError, TypeError):
TensorFlowTensor = None
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_tensor',
[
(TensorFlowTensor, True),
(Optional[TensorFlowTensor], False),
],
)
def test_is_type_tensor_with_tf(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(TensorFlowTensor, False),
(Optional[TensorFlowTensor], True),
(Union[NdArray, TorchTensor, TensorFlowTensor], True),
(Union[NdArray, TorchTensor, Optional[TensorFlowTensor]], True),
],
)
def test_is_union_type_tensor_with_tf(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16
from mmdet.registry import MODELS
@MODELS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channel (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channel,
num_deconv_filters,
num_deconv_kernels,
use_dcn=True,
init_cfg=None):
super(CTResNetNeck, self).__init__(init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channel = in_channel
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels):
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channel = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channel,
feat_channel,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channel,
feat_channel,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channel = feat_channel
return nn.Sequential(*layers)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
@auto_fp16()
def forward(self, inputs):
assert isinstance(inputs, (list, tuple))
outs = self.deconv_layers(inputs[-1])
return outs,
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.builder import NECKS
@NECKS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channel (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channel,
num_deconv_filters,
num_deconv_kernels,
use_dcn=True,
init_cfg=None):
super(CTResNetNeck, self).__init__(init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channel = in_channel
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels):
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channel = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channel,
feat_channel,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channel,
feat_channel,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channel = feat_channel
return nn.Sequential(*layers)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
@auto_fp16()
def forward(self, inputs):
assert isinstance(inputs, (list, tuple))
outs = self.deconv_layers(inputs[-1])
return outs,
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseMarginMSELoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast(TextIO, Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast(TextIO, open(filename, mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
__version__ = '0.30.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.30.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import NavigateTool
from langchain_community.tools.playwright.navigate import NavigateToolInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NavigateToolInput": "langchain_community.tools.playwright.navigate",
"NavigateTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NavigateTool",
"NavigateToolInput",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import NavigateTool
from langchain_community.tools.playwright.navigate import NavigateToolInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NavigateToolInput": "langchain_community.tools.playwright.navigate",
"NavigateTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NavigateToolInput",
"NavigateTool",
]
|
"""Dataset Module."""
from llama_index.core.llama_dataset.base import (
BaseLlamaDataExample,
BaseLlamaDataset,
BaseLlamaExamplePrediction,
BaseLlamaPredictionDataset,
CreatedBy,
CreatedByType,
)
from llama_index.core.llama_dataset.download import download_llama_dataset
from llama_index.core.llama_dataset.evaluator_evaluation import (
EvaluatorExamplePrediction,
EvaluatorPredictionDataset,
LabeledEvaluatorDataExample,
LabeledEvaluatorDataset,
LabeledPairwiseEvaluatorDataExample,
LabeledPairwiseEvaluatorDataset,
LabelledEvaluatorDataExample,
LabelledEvaluatorDataset,
LabelledPairwiseEvaluatorDataExample,
LabelledPairwiseEvaluatorDataset,
PairwiseEvaluatorExamplePrediction,
PairwiseEvaluatorPredictionDataset,
)
from llama_index.core.llama_dataset.rag import (
LabeledRagDataExample,
LabeledRagDataset,
LabelledRagDataExample,
LabelledRagDataset,
RagExamplePrediction,
RagPredictionDataset,
)
__all__ = [
"BaseLlamaDataset",
"BaseLlamaDataExample",
"BaseLlamaExamplePrediction",
"BaseLlamaPredictionDataset",
"LabelledRagDataExample",
"LabelledRagDataset",
"LabeledRagDataExample",
"LabeledRagDataset",
"RagExamplePrediction",
"RagPredictionDataset",
"CreatedByType",
"CreatedBy",
"download_llama_dataset",
"EvaluatorExamplePrediction",
"EvaluatorPredictionDataset",
"LabeledEvaluatorDataset",
"LabelledEvaluatorDataset",
"LabelledEvaluatorDataExample",
"LabeledEvaluatorDataExample",
"LabelledPairwiseEvaluatorDataExample",
"LabelledPairwiseEvaluatorDataset",
"LabeledPairwiseEvaluatorDataExample",
"LabeledPairwiseEvaluatorDataset",
"PairwiseEvaluatorExamplePrediction",
"PairwiseEvaluatorPredictionDataset",
]
|
""" Dataset Module."""
from llama_index.core.llama_dataset.base import (
BaseLlamaDataExample,
BaseLlamaDataset,
BaseLlamaExamplePrediction,
BaseLlamaPredictionDataset,
CreatedBy,
CreatedByType,
)
from llama_index.core.llama_dataset.download import download_llama_dataset
from llama_index.core.llama_dataset.evaluator_evaluation import (
EvaluatorExamplePrediction,
EvaluatorPredictionDataset,
LabeledEvaluatorDataExample,
LabeledEvaluatorDataset,
LabeledPairwiseEvaluatorDataExample,
LabeledPairwiseEvaluatorDataset,
LabelledEvaluatorDataExample,
LabelledEvaluatorDataset,
LabelledPairwiseEvaluatorDataExample,
LabelledPairwiseEvaluatorDataset,
PairwiseEvaluatorExamplePrediction,
PairwiseEvaluatorPredictionDataset,
)
from llama_index.core.llama_dataset.rag import (
LabeledRagDataExample,
LabeledRagDataset,
LabelledRagDataExample,
LabelledRagDataset,
RagExamplePrediction,
RagPredictionDataset,
)
__all__ = [
"BaseLlamaDataset",
"BaseLlamaDataExample",
"BaseLlamaExamplePrediction",
"BaseLlamaPredictionDataset",
"LabelledRagDataExample",
"LabelledRagDataset",
"LabeledRagDataExample",
"LabeledRagDataset",
"RagExamplePrediction",
"RagPredictionDataset",
"CreatedByType",
"CreatedBy",
"download_llama_dataset",
"EvaluatorExamplePrediction",
"EvaluatorPredictionDataset",
"LabeledEvaluatorDataset",
"LabelledEvaluatorDataset",
"LabelledEvaluatorDataExample",
"LabeledEvaluatorDataExample",
"LabelledPairwiseEvaluatorDataExample",
"LabelledPairwiseEvaluatorDataset",
"LabeledPairwiseEvaluatorDataExample",
"LabeledPairwiseEvaluatorDataset",
"PairwiseEvaluatorExamplePrediction",
"PairwiseEvaluatorPredictionDataset",
]
|
"""[DEPRECATED] Pipeline prompt template."""
from typing import Any
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseChatPromptTemplate
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
return {k: inputs[k] for k in input_variables}
@deprecated(
since="0.3.22",
removal="1.0",
message=(
"This class is deprecated. Please see the docstring below or at the link"
" for a replacement option: "
"https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html"
),
)
class PipelinePromptTemplate(BasePromptTemplate):
"""[DEPRECATED] Pipeline prompt template.
This has been deprecated in favor of chaining individual prompts together in your
code. E.g. using a for loop, you could do:
.. code-block:: python
my_input = {"key": "value"}
for name, prompt in pipeline_prompts:
my_input[name] = prompt.invoke(my_input).to_string()
my_output = final_prompt.invoke(my_input)
Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "pipeline"]
@model_validator(mode="before")
@classmethod
def get_input_variables(cls, values: dict) -> Any:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**inputs)
else:
kwargs[k] = prompt.format(**inputs)
inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**inputs)
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = await prompt.aformat_messages(**inputs)
else:
kwargs[k] = await prompt.aformat(**inputs)
inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return await self.final_prompt.aformat_prompt(**inputs)
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return self.format_prompt(**kwargs).to_string()
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return (await self.aformat_prompt(**kwargs)).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError
|
"""[DEPRECATED] Pipeline prompt template."""
from typing import Any
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseChatPromptTemplate
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
return {k: inputs[k] for k in input_variables}
@deprecated(
since="0.3.22",
removal="1.0",
message=(
"This class is deprecated. Please see the docstring below or at the link"
" for a replacement option: "
"https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html"
),
)
class PipelinePromptTemplate(BasePromptTemplate):
"""[DEPRECATED] Pipeline prompt template.
This has been deprecated in favor of chaining individual prompts together in your
code. E.g. using a for loop, you could do:
.. code-block:: python
my_input = {"key": "value"}
for name, prompt in pipeline_prompts:
my_input[name] = prompt.invoke(my_input).to_string()
my_output = final_prompt.invoke(my_input)
Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "pipeline"]
@model_validator(mode="before")
@classmethod
def get_input_variables(cls, values: dict) -> Any:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**_inputs)
else:
kwargs[k] = prompt.format(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**_inputs)
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = await prompt.aformat_messages(**_inputs)
else:
kwargs[k] = await prompt.aformat(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return await self.final_prompt.aformat_prompt(**_inputs)
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return self.format_prompt(**kwargs).to_string()
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return (await self.aformat_prompt(**kwargs)).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError
|
import itertools
import os.path
import pytest
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
def test_flow_multiprotocol_with_monitoring():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(port=ports, protocol=protocols, monitoring=True)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
|
import itertools
import os.path
import pytest
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
|
# mypy: allow-untyped-defs
from contextlib import contextmanager
from typing import NoReturn
try:
from torch._C import _itt
except ImportError:
class _ITTStub:
@staticmethod
def _fail(*args, **kwargs) -> NoReturn:
raise RuntimeError(
"ITT functions not installed. Are you sure you have a ITT build?"
)
@staticmethod
def is_available() -> bool:
return False
rangePush = _fail
rangePop = _fail
mark = _fail
_itt = _ITTStub() # type: ignore[assignment]
__all__ = ["is_available", "range_push", "range_pop", "mark", "range"]
def is_available():
"""
Check if ITT feature is available or not
"""
return _itt.is_available()
def range_push(msg):
"""
Pushes a range onto a stack of nested range span. Returns zero-based
depth of the range that is started.
Arguments:
msg (str): ASCII message to associate with range
"""
return _itt.rangePush(msg)
def range_pop():
"""
Pops a range off of a stack of nested range spans. Returns the
zero-based depth of the range that is ended.
"""
return _itt.rangePop()
def mark(msg):
"""
Describe an instantaneous event that occurred at some point.
Arguments:
msg (str): ASCII message to associate with the event.
"""
return _itt.mark(msg)
@contextmanager
def range(msg, *args, **kwargs):
"""
Context manager / decorator that pushes an ITT range at the beginning
of its scope, and pops it at the end. If extra arguments are given,
they are passed as arguments to msg.format().
Args:
msg (str): message to associate with the range
"""
range_push(msg.format(*args, **kwargs))
try:
yield
finally:
range_pop()
|
# mypy: allow-untyped-defs
from contextlib import contextmanager
try:
from torch._C import _itt
except ImportError:
class _ITTStub:
@staticmethod
def _fail(*args, **kwargs):
raise RuntimeError(
"ITT functions not installed. Are you sure you have a ITT build?"
)
@staticmethod
def is_available():
return False
rangePush = _fail
rangePop = _fail
mark = _fail
_itt = _ITTStub() # type: ignore[assignment]
__all__ = ["is_available", "range_push", "range_pop", "mark", "range"]
def is_available():
"""
Check if ITT feature is available or not
"""
return _itt.is_available()
def range_push(msg):
"""
Pushes a range onto a stack of nested range span. Returns zero-based
depth of the range that is started.
Arguments:
msg (str): ASCII message to associate with range
"""
return _itt.rangePush(msg)
def range_pop():
"""
Pops a range off of a stack of nested range spans. Returns the
zero-based depth of the range that is ended.
"""
return _itt.rangePop()
def mark(msg):
"""
Describe an instantaneous event that occurred at some point.
Arguments:
msg (str): ASCII message to associate with the event.
"""
return _itt.mark(msg)
@contextmanager
def range(msg, *args, **kwargs):
"""
Context manager / decorator that pushes an ITT range at the beginning
of its scope, and pops it at the end. If extra arguments are given,
they are passed as arguments to msg.format().
Args:
msg (str): message to associate with the range
"""
range_push(msg.format(*args, **kwargs))
try:
yield
finally:
range_pop()
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(BaseDocument):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
"""
Epub parser.
Contains parsers for epub files.
"""
from pathlib import Path
from typing import Dict, List, Optional
import logging
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class EpubReader(BaseReader):
"""Epub Parser."""
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
try:
import ebooklib
import html2text
from ebooklib import epub
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the EpubReader: "
"`pip install EbookLib html2text`"
)
if fs:
logger.warning(
"fs was specified but EpubReader doesn't support loading "
"from fsspec filesystems. Will load from local filesystem instead."
)
text_list = []
book = epub.read_epub(file, options={"ignore_ncx": True})
# Iterate through all chapters.
for item in book.get_items():
# Chapters are typically located in epub documents items.
if item.get_type() == ebooklib.ITEM_DOCUMENT:
text_list.append(
html2text.html2text(item.get_content().decode("utf-8"))
)
text = "\n".join(text_list)
return [Document(text=text, metadata=extra_info or {})]
|
"""Epub parser.
Contains parsers for epub files.
"""
from pathlib import Path
from typing import Dict, List, Optional
import logging
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class EpubReader(BaseReader):
"""Epub Parser."""
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
try:
import ebooklib
import html2text
from ebooklib import epub
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the EpubReader: "
"`pip install EbookLib html2text`"
)
if fs:
logger.warning(
"fs was specified but EpubReader doesn't support loading "
"from fsspec filesystems. Will load from local filesystem instead."
)
text_list = []
book = epub.read_epub(file, options={"ignore_ncx": True})
# Iterate through all chapters.
for item in book.get_items():
# Chapters are typically located in epub documents items.
if item.get_type() == ebooklib.ITEM_DOCUMENT:
text_list.append(
html2text.html2text(item.get_content().decode("utf-8"))
)
text = "\n".join(text_list)
return [Document(text=text, metadata=extra_info or {})]
|
import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
if TYPE_CHECKING:
from docarray.array.stacked.column_storage import ColumnStorageView
_console: Console = Console()
T = TypeVar('T', bound='BaseDocument')
class BaseDocument(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: Optional[ID] = Field(default_factory=lambda: ID(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def from_view(cls: Type[T], storage_view: 'ColumnStorageView') -> T:
doc = cls.__new__(cls)
object.__setattr__(doc, '__dict__', storage_view)
object.__setattr__(doc, '__fields_set__', set(storage_view.keys()))
doc._init_private_attributes()
return doc
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self) -> str:
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self) -> None:
"""Displays the object in IPython as a summary"""
self.summary()
def is_view(self) -> bool:
from docarray.array.stacked.column_storage import ColumnStorageView
return isinstance(self.__dict__, ColumnStorageView)
def __getattr__(self, item) -> Any:
if item in self.__fields__.keys():
return self.__dict__[item]
else:
return super().__getattribute__(item)
def __setattr__(self, field, value) -> None:
if not self.is_view():
super().__setattr__(field, value)
else:
# here we first validate with pydantic
# Then we apply the value to the remote dict,
# and we change back the __dict__ value to the remote dict
dict_ref = self.__dict__
super().__setattr__(field, value)
for key, val in self.__dict__.items():
dict_ref[key] = val
object.__setattr__(self, '__dict__', dict_ref)
|
import os
from typing import Optional, Type
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import IOMixin, UpdateMixin
from docarray.typing import ID
_console: Console = Console()
class BaseDocument(BaseModel, IOMixin, UpdateMixin, BaseNode):
"""
The base class for Documents
"""
id: Optional[ID] = Field(default_factory=lambda: ID(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self):
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary(doc=self).summary()
@classmethod
def schema_summary(cls) -> None:
"""Print a summary of the Documents schema."""
from docarray.display.document_summary import DocumentSummary
DocumentSummary.schema_summary(cls)
def _ipython_display_(self):
"""Displays the object in IPython as a summary"""
self.summary()
|
import os
import sys
from test_utils import DirectoryExcursion
if len(sys.argv) != 4:
print("Usage: {} [wheel to rename] [commit id] [platform tag]".format(sys.argv[0]))
sys.exit(1)
whl_path = sys.argv[1]
commit_id = sys.argv[2]
platform_tag = sys.argv[3]
dirname, basename = os.path.dirname(whl_path), os.path.basename(whl_path)
with DirectoryExcursion(dirname):
tokens = basename.split("-")
assert len(tokens) == 5
version = tokens[1].split("+")[0]
keywords = {
"pkg_name": tokens[0],
"version": version,
"commit_id": commit_id,
"platform_tag": platform_tag,
}
new_name = "{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl".format(
**keywords
)
print("Renaming {} to {}...".format(basename, new_name))
if os.path.isfile(new_name):
os.remove(new_name)
os.rename(basename, new_name)
filesize = os.path.getsize(new_name) / 1024 / 1024 # MB
print(f"Wheel size: {filesize}")
msg = f"Limit of wheel size set by PyPI is exceeded. {new_name}: {filesize}"
assert filesize <= 300, msg
|
import os
import sys
from contextlib import contextmanager
@contextmanager
def cd(path):
path = os.path.normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
if len(sys.argv) != 4:
print('Usage: {} [wheel to rename] [commit id] [platform tag]'.format(sys.argv[0]))
sys.exit(1)
whl_path = sys.argv[1]
commit_id = sys.argv[2]
platform_tag = sys.argv[3]
dirname, basename = os.path.dirname(whl_path), os.path.basename(whl_path)
with cd(dirname):
tokens = basename.split('-')
assert len(tokens) == 5
version = tokens[1].split('+')[0]
keywords = {'pkg_name': tokens[0],
'version': version,
'commit_id': commit_id,
'platform_tag': platform_tag}
new_name = '{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl'.format(**keywords)
print('Renaming {} to {}...'.format(basename, new_name))
if os.path.isfile(new_name):
os.remove(new_name)
os.rename(basename, new_name)
filesize = os.path.getsize(new_name) / 1024 / 1024 # MB
msg = f"Limit of wheel size set by PyPI is exceeded. {new_name}: {filesize}"
assert filesize <= 300, msg
|
import importlib
import os
import re
from pathlib import Path
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
T = TypeVar("T")
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
for block_cls in all_subclasses(Block):
name = block_cls.__name__
if block_cls.__name__.endswith("Base"):
continue
if not block_cls.__name__.endswith("Block"):
raise ValueError(
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Make sure all fields in input_schema and output_schema are annotated and has a value
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
if block.disabled:
continue
AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
|
import importlib
import os
import re
from pathlib import Path
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"separated by underscores, and contain only alphabet characters"
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
T = TypeVar("T")
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
for block_cls in all_subclasses(Block):
name = block_cls.__name__
if block_cls.__name__.endswith("Base"):
continue
if not block_cls.__name__.endswith("Block"):
raise ValueError(
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Make sure all fields in input_schema and output_schema are annotated and has a value
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
if block.disabled:
continue
AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
|
from datetime import datetime
from typing import List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.blocks.exa.helpers import ContentSettings
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ExaSearchBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
query: str = SchemaField(description="The search query")
use_auto_prompt: bool = SchemaField(
description="Whether to use autoprompt",
default=True,
advanced=True,
)
type: str = SchemaField(
description="Type of search",
default="",
advanced=True,
)
category: str = SchemaField(
description="Category to search within",
default="",
advanced=True,
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include",
default_factory=list,
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude",
default_factory=list,
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of search results",
default_factory=list,
)
def __init__(self):
super().__init__(
id="996cec64-ac40-4dde-982f-b0dc60a5824d",
description="Searches the web using Exa's advanced search API",
categories={BlockCategory.SEARCH},
input_schema=ExaSearchBlock.Input,
output_schema=ExaSearchBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/search"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"query": input_data.query,
"useAutoprompt": input_data.use_auto_prompt,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
optional_field_mapping = {
"type": "type",
"category": "category",
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add other fields
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
# Extract just the results array from the response
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
from datetime import datetime
from typing import List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.blocks.exa.helpers import ContentSettings
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ExaSearchBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
query: str = SchemaField(description="The search query")
use_auto_prompt: bool = SchemaField(
description="Whether to use autoprompt",
default=True,
advanced=True,
)
type: str = SchemaField(
description="Type of search",
default="",
advanced=True,
)
category: str = SchemaField(
description="Category to search within",
default="",
advanced=True,
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default=[],
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default=[],
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include",
default=[],
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude",
default=[],
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of search results",
default=[],
)
def __init__(self):
super().__init__(
id="996cec64-ac40-4dde-982f-b0dc60a5824d",
description="Searches the web using Exa's advanced search API",
categories={BlockCategory.SEARCH},
input_schema=ExaSearchBlock.Input,
output_schema=ExaSearchBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/search"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"query": input_data.query,
"useAutoprompt": input_data.use_auto_prompt,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
optional_field_mapping = {
"type": "type",
"category": "category",
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add other fields
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
# Extract just the results array from the response
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
try:
import sklearn
except ImportError:
sklearn = None
def _validate_data(estimator, *args, **kwargs):
"""Validate the input data.
wrapper for sklearn.utils.validation.validate_data or
BaseEstimator._validate_data depending on the scikit-learn version.
TODO: remove when minimum scikit-learn version is 1.6
"""
try:
# scikit-learn >= 1.6
from sklearn.utils.validation import validate_data
return validate_data(estimator, *args, **kwargs)
except ImportError:
return estimator._validate_data(*args, **kwargs)
except:
raise
def type_of_target(y, input_name="", *, raise_unknown=False):
def _raise_or_return(target_type):
"""Depending on the value of raise_unknown, either raise an error or
return 'unknown'.
"""
if raise_unknown and target_type == "unknown":
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return target_type
target_type = sklearn.utils.multiclass.type_of_target(
y, input_name=input_name
)
return _raise_or_return(target_type)
def _routing_enabled():
"""Return whether metadata routing is enabled.
Returns:
enabled : bool
Whether metadata routing is enabled. If the config is not set, it
defaults to False.
TODO: remove when the config key is no longer available in scikit-learn
"""
return sklearn.get_config().get("enable_metadata_routing", False)
def _raise_for_params(params, owner, method):
"""Raise an error if metadata routing is not enabled and params are passed.
Parameters:
params : dict
The metadata passed to a method.
owner : object
The object to which the method belongs.
method : str
The name of the method, e.g. "fit".
Raises:
ValueError
If metadata routing is not enabled and params are passed.
"""
caller = (
f"{owner.__class__.__name__}.{method}"
if method
else owner.__class__.__name__
)
if not _routing_enabled() and params:
raise ValueError(
f"Passing extra keyword arguments to {caller} is only supported if"
" enable_metadata_routing=True, which you can set using"
" `sklearn.set_config`. See the User Guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
f" details. Extra parameters passed are: {set(params)}"
)
|
import sklearn
from packaging.version import parse as parse_version
from sklearn import get_config
sklearn_version = parse_version(parse_version(sklearn.__version__).base_version)
if sklearn_version < parse_version("1.6"):
def patched_more_tags(estimator, expected_failed_checks):
import copy
from sklearn.utils._tags import _safe_tags
original_tags = copy.deepcopy(_safe_tags(estimator))
def patched_more_tags(self):
original_tags.update({"_xfail_checks": expected_failed_checks})
return original_tags
estimator.__class__._more_tags = patched_more_tags
return estimator
def parametrize_with_checks(
estimators,
*,
legacy: bool = True,
expected_failed_checks=None,
):
# legacy is not supported and ignored
from sklearn.utils.estimator_checks import parametrize_with_checks # noqa: F401, I001
estimators = [
patched_more_tags(estimator, expected_failed_checks(estimator))
for estimator in estimators
]
return parametrize_with_checks(estimators)
else:
from sklearn.utils.estimator_checks import parametrize_with_checks # noqa: F401, I001
def _validate_data(estimator, *args, **kwargs):
"""Validate the input data.
wrapper for sklearn.utils.validation.validate_data or
BaseEstimator._validate_data depending on the scikit-learn version.
TODO: remove when minimum scikit-learn version is 1.6
"""
try:
# scikit-learn >= 1.6
from sklearn.utils.validation import validate_data
return validate_data(estimator, *args, **kwargs)
except ImportError:
return estimator._validate_data(*args, **kwargs)
except:
raise
def type_of_target(y, input_name="", *, raise_unknown=False):
# fix for raise_unknown which is introduced in scikit-learn 1.6
from sklearn.utils.multiclass import type_of_target
def _raise_or_return(target_type):
"""Depending on the value of raise_unknown, either raise an error or
return 'unknown'.
"""
if raise_unknown and target_type == "unknown":
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return target_type
target_type = type_of_target(y, input_name=input_name)
return _raise_or_return(target_type)
def _routing_enabled():
"""Return whether metadata routing is enabled.
Returns:
enabled : bool
Whether metadata routing is enabled. If the config is not set, it
defaults to False.
TODO: remove when the config key is no longer available in scikit-learn
"""
return get_config().get("enable_metadata_routing", False)
def _raise_for_params(params, owner, method):
"""Raise an error if metadata routing is not enabled and params are passed.
Parameters:
params : dict
The metadata passed to a method.
owner : object
The object to which the method belongs.
method : str
The name of the method, e.g. "fit".
Raises:
ValueError
If metadata routing is not enabled and params are passed.
"""
caller = (
f"{owner.__class__.__name__}.{method}"
if method
else owner.__class__.__name__
)
if not _routing_enabled() and params:
raise ValueError(
f"Passing extra keyword arguments to {caller} is only supported if"
" enable_metadata_routing=True, which you can set using"
" `sklearn.set_config`. See the User Guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
f" details. Extra parameters passed are: {set(params)}"
)
|
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertDtype(Transform):
"""[BETA] Convert a tensor image/box/mask to the given ``dtype`` and scale the values accordingly
.. betastatus:: ConvertDtype transform
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBox(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertDtype(Transform):
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBox(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
import time
from jina import Flow
from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces
def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port):
f = Flow(
tracing=True,
traces_exporter_host='http://localhost',
traces_exporter_port=otlp_receiver_port,
).add(uses=ExecutorTestWithTracing)
with f:
from docarray import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
traces = get_traces(jaeger_port, 'executor0/rep-0')
process_single_data_span_ids = set()
search_request_parent_span_ids = set()
for trace in traces:
for span in trace['spans']:
if (
span['operationName']
== '/jina.JinaSingleDataRequestRPC/process_single_data'
):
process_single_data_span_ids.add(span['spanID'])
if span['operationName'] == '/search':
references = span.get('references', [])
for ref in references:
search_request_parent_span_ids.add(ref.get('spanID', ''))
assert any(
[
search_span in process_single_data_span_ids
for search_span in search_request_parent_span_ids
]
)
|
import time
from jina import Flow
from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces
def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port):
f = Flow(
tracing=True,
traces_exporter_host='http://localhost',
traces_exporter_port=otlp_receiver_port,
).add(uses=ExecutorTestWithTracing)
with f:
from jina import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
traces = get_traces(jaeger_port, 'executor0/rep-0')
process_single_data_span_ids = set()
search_request_parent_span_ids = set()
for trace in traces:
for span in trace['spans']:
if (
span['operationName']
== '/jina.JinaSingleDataRequestRPC/process_single_data'
):
process_single_data_span_ids.add(span['spanID'])
if span['operationName'] == '/search':
references = span.get('references', [])
for ref in references:
search_request_parent_span_ids.add(ref.get('spanID', ''))
assert any(
[
search_span in process_single_data_span_ids
for search_span in search_request_parent_span_ids
]
)
|
from torch import nn, Tensor
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* [:footcite:`collobert2016wav2letter`].
:math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True),
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
from torch import Tensor
from torch import nn
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* [:footcite:`collobert2016wav2letter`].
:math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True),
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.25.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.25.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
GraphExecution,
NodeExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_graph_execution,
get_incomplete_node_executions,
get_latest_node_execution,
get_node_execution_results,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
update_node_execution_status,
update_node_execution_status_batch,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(
user_id: str, cost: int, metadata: UsageTransactionMetadata
) -> int:
return await _user_credit_model.spend_credits(user_id, cost, metadata)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.execution_event_bus = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(
self, execution_result: GraphExecution | NodeExecutionResult
):
self.execution_event_bus.publish(execution_result)
# Executions
get_graph_execution = exposed_run_and_wait(get_graph_execution)
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_node_execution_results = exposed_run_and_wait(get_node_execution_results)
get_incomplete_node_executions = exposed_run_and_wait(
get_incomplete_node_executions
)
get_latest_node_execution = exposed_run_and_wait(get_latest_node_execution)
update_node_execution_status = exposed_run_and_wait(update_node_execution_status)
update_node_execution_status_batch = exposed_run_and_wait(
update_node_execution_status_batch
)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
GraphExecutionMeta,
NodeExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_incomplete_node_executions,
get_latest_node_execution,
get_node_execution_results,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
update_node_execution_status,
update_node_execution_status_batch,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(
user_id: str, cost: int, metadata: UsageTransactionMetadata
) -> int:
return await _user_credit_model.spend_credits(user_id, cost, metadata)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.execution_event_bus = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(
self, execution_result: GraphExecutionMeta | NodeExecutionResult
):
self.execution_event_bus.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_node_execution_results = exposed_run_and_wait(get_node_execution_results)
get_incomplete_node_executions = exposed_run_and_wait(
get_incomplete_node_executions
)
get_latest_node_execution = exposed_run_and_wait(get_latest_node_execution)
update_node_execution_status = exposed_run_and_wait(update_node_execution_status)
update_node_execution_status_batch = exposed_run_and_wait(
update_node_execution_status_batch
)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
# TODO(1.7): remove parallel_backend and register_parallel_backend
msg = "deprecated in 1.5 to be removed in 1.7. Use joblib.{} instead."
register_parallel_backend = deprecated(msg)(_joblib.register_parallel_backend)
# if a class, deprecated will change the object in _joblib module so we need to subclass
@deprecated(msg)
class parallel_backend(_joblib.parallel_backend):
pass
__all__ = [
"murmurhash3_32",
"as_float_array",
"assert_all_finite",
"check_array",
"check_random_state",
"compute_class_weight",
"compute_sample_weight",
"column_or_1d",
"check_consistent_length",
"check_X_y",
"check_scalar",
"indexable",
"check_symmetric",
"deprecated",
"parallel_backend",
"register_parallel_backend",
"resample",
"shuffle",
"all_estimators",
"DataConversionWarning",
"estimator_html_repr",
"Bunch",
"metadata_routing",
"safe_sqr",
"safe_mask",
"gen_batches",
"gen_even_slices",
"Tags",
"InputTags",
"TargetTags",
"ClassifierTags",
"RegressorTags",
"TransformerTags",
"get_tags",
]
# TODO(1.7): remove
def __getattr__(name):
if name == "IS_PYPY":
warnings.warn(
"IS_PYPY is deprecated and will be removed in 1.7.",
FutureWarning,
)
return platform.python_implementation() == "PyPy"
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO(1.7): remove tosequence
@deprecated("tosequence was deprecated in 1.5 and will be removed in 1.7")
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
The iterable to be converted.
Returns
-------
x : Sequence
If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
is a `Sequence`, `x` is returned as-is. If `x` is from any other
type, `x` is returned casted as a list.
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
default_tags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
# TODO(1.7): remove parallel_backend and register_parallel_backend
msg = "deprecated in 1.5 to be removed in 1.7. Use joblib.{} instead."
register_parallel_backend = deprecated(msg)(_joblib.register_parallel_backend)
# if a class, deprecated will change the object in _joblib module so we need to subclass
@deprecated(msg)
class parallel_backend(_joblib.parallel_backend):
pass
__all__ = [
"murmurhash3_32",
"as_float_array",
"assert_all_finite",
"check_array",
"check_random_state",
"compute_class_weight",
"compute_sample_weight",
"column_or_1d",
"check_consistent_length",
"check_X_y",
"check_scalar",
"indexable",
"check_symmetric",
"deprecated",
"parallel_backend",
"register_parallel_backend",
"resample",
"shuffle",
"all_estimators",
"DataConversionWarning",
"estimator_html_repr",
"Bunch",
"metadata_routing",
"safe_sqr",
"safe_mask",
"gen_batches",
"gen_even_slices",
"Tags",
"InputTags",
"TargetTags",
"ClassifierTags",
"RegressorTags",
"TransformerTags",
"default_tags",
"get_tags",
]
# TODO(1.7): remove
def __getattr__(name):
if name == "IS_PYPY":
warnings.warn(
"IS_PYPY is deprecated and will be removed in 1.7.",
FutureWarning,
)
return platform.python_implementation() == "PyPy"
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO(1.7): remove tosequence
@deprecated("tosequence was deprecated in 1.5 and will be removed in 1.7")
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
The iterable to be converted.
Returns
-------
x : Sequence
If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
is a `Sequence`, `x` is returned as-is. If `x` is from any other
type, `x` is returned casted as a list.
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
from langchain_core.example_selectors.semantic_similarity import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
sorted_values,
)
__all__ = [
"MaxMarginalRelevanceExampleSelector",
"SemanticSimilarityExampleSelector",
"sorted_values",
]
|
from langchain_core.example_selectors.semantic_similarity import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
sorted_values,
)
__all__ = [
"sorted_values",
"SemanticSimilarityExampleSelector",
"MaxMarginalRelevanceExampleSelector",
]
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
traceback.print_exc()
sys.exit(1 if has_failure else 0)
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_faillure = True
traceback.print_exc()
sys.exit(1 if has_failure else 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""This file holding some environment constant for sharing by other files."""
import os.path as osp
import subprocess
import sys
from collections import OrderedDict, defaultdict
from distutils import errors
import cv2
import numpy as np
import torch
import mmengine
from .parrots_wrapper import TORCH_VERSION, get_build_config, is_rocm_pytorch
def _get_cuda_home():
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def collect_env():
"""Collect the information of the running environments.
Returns:
dict: The environment information. The following fields are contained.
- sys.platform: The variable of ``sys.platform``.
- Python: Python version.
- CUDA available: Bool, indicating if CUDA is available.
- GPU devices: Device type of each GPU.
- CUDA_HOME (optional): The env var ``CUDA_HOME``.
- NVCC (optional): NVCC version.
- GCC: GCC version, "n/a" if GCC is not installed.
- MSVC: Microsoft Virtual C++ Compiler version, Windows only.
- PyTorch: PyTorch version.
- PyTorch compiling details: The output of \
``torch.__config__.show()``.
- TorchVision (optional): TorchVision version.
- OpenCV (optional): OpenCV version.
- MMENGINE: MMENGINE version.
"""
env_info = OrderedDict()
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
env_info['numpy_random_seed'] = np.random.get_state()[1][0]
if cuda_available:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, device_ids in devices.items():
env_info['GPU ' + ','.join(device_ids)] = name
CUDA_HOME = _get_cuda_home()
env_info['CUDA_HOME'] = CUDA_HOME
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output(f'"{nvcc}" -V', shell=True)
nvcc = nvcc.decode('utf-8').strip()
release = nvcc.rfind('Cuda compilation tools')
build = nvcc.rfind('Build ')
nvcc = nvcc[release:build].strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
try:
# Check C++ Compiler.
# For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',
# indicating the compiler used, we use this to get the compiler name
import sysconfig
cc = sysconfig.get_config_var('CC')
if cc:
cc = osp.basename(cc.split()[0])
cc_info = subprocess.check_output(f'{cc} --version', shell=True)
env_info['GCC'] = cc_info.decode('utf-8').partition(
'\n')[0].strip()
else:
# on Windows, cl.exe is not in PATH. We need to find the path.
# distutils.ccompiler.new_compiler() returns a msvccompiler
# object and after initialization, path to cl.exe is found.
import locale
import os
from distutils.ccompiler import new_compiler
ccompiler = new_compiler()
ccompiler.initialize()
cc = subprocess.check_output(
f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True)
encoding = os.device_encoding(
sys.stdout.fileno()) or locale.getpreferredencoding()
env_info['MSVC'] = cc.decode(encoding).partition('\n')[0].strip()
env_info['GCC'] = 'n/a'
except (subprocess.CalledProcessError, errors.DistutilsPlatformError):
env_info['GCC'] = 'n/a'
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = get_build_config()
try:
import torchvision
env_info['TorchVision'] = torchvision.__version__
except ModuleNotFoundError:
pass
env_info['OpenCV'] = cv2.__version__
env_info['MMEngine'] = mmengine.__version__
return env_info
|
# Copyright (c) OpenMMLab. All rights reserved.
"""This file holding some environment constant for sharing by other files."""
import os.path as osp
import subprocess
import sys
from collections import OrderedDict, defaultdict
import cv2
import numpy as np
import torch
import mmengine
from .parrots_wrapper import TORCH_VERSION, get_build_config, is_rocm_pytorch
def _get_cuda_home():
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def collect_env():
"""Collect the information of the running environments.
Returns:
dict: The environment information. The following fields are contained.
- sys.platform: The variable of ``sys.platform``.
- Python: Python version.
- CUDA available: Bool, indicating if CUDA is available.
- GPU devices: Device type of each GPU.
- CUDA_HOME (optional): The env var ``CUDA_HOME``.
- NVCC (optional): NVCC version.
- GCC: GCC version, "n/a" if GCC is not installed.
- MSVC: Microsoft Virtual C++ Compiler version, Windows only.
- PyTorch: PyTorch version.
- PyTorch compiling details: The output of \
``torch.__config__.show()``.
- TorchVision (optional): TorchVision version.
- OpenCV (optional): OpenCV version.
- MMENGINE: MMENGINE version.
"""
env_info = OrderedDict()
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
env_info['numpy_random_seed'] = np.random.get_state()[1][0]
if cuda_available:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, device_ids in devices.items():
env_info['GPU ' + ','.join(device_ids)] = name
CUDA_HOME = _get_cuda_home()
env_info['CUDA_HOME'] = CUDA_HOME
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output(f'"{nvcc}" -V', shell=True)
nvcc = nvcc.decode('utf-8').strip()
release = nvcc.rfind('Cuda compilation tools')
build = nvcc.rfind('Build ')
nvcc = nvcc[release:build].strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
try:
# Check C++ Compiler.
# For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',
# indicating the compiler used, we use this to get the compiler name
import sysconfig
cc = sysconfig.get_config_var('CC')
if cc:
cc = osp.basename(cc.split()[0])
cc_info = subprocess.check_output(f'{cc} --version', shell=True)
env_info['GCC'] = cc_info.decode('utf-8').partition(
'\n')[0].strip()
else:
# on Windows, cl.exe is not in PATH. We need to find the path.
# distutils.ccompiler.new_compiler() returns a msvccompiler
# object and after initialization, path to cl.exe is found.
import locale
import os
from distutils.ccompiler import new_compiler
ccompiler = new_compiler()
ccompiler.initialize()
cc = subprocess.check_output(
f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True)
encoding = os.device_encoding(
sys.stdout.fileno()) or locale.getpreferredencoding()
env_info['MSVC'] = cc.decode(encoding).partition('\n')[0].strip()
env_info['GCC'] = 'n/a'
except subprocess.CalledProcessError:
env_info['GCC'] = 'n/a'
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = get_build_config()
try:
import torchvision
env_info['TorchVision'] = torchvision.__version__
except ModuleNotFoundError:
pass
env_info['OpenCV'] = cv2.__version__
env_info['MMEngine'] = mmengine.__version__
return env_info
|
# Copyright (c) OpenMMLab. All rights reserved.
from .vis_backend import (BaseVisBackend, ClearMLVisBackend, DVCLiveVisBackend,
LocalVisBackend, MLflowVisBackend, NeptuneVisBackend,
TensorboardVisBackend, WandbVisBackend)
from .visualizer import Visualizer
__all__ = [
'Visualizer', 'BaseVisBackend', 'LocalVisBackend', 'WandbVisBackend',
'TensorboardVisBackend', 'MLflowVisBackend', 'ClearMLVisBackend',
'NeptuneVisBackend', 'DVCLiveVisBackend'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .vis_backend import (BaseVisBackend, ClearMLVisBackend, LocalVisBackend,
MLflowVisBackend, NeptuneVisBackend,
TensorboardVisBackend, WandbVisBackend)
from .visualizer import Visualizer
__all__ = [
'Visualizer', 'BaseVisBackend', 'LocalVisBackend', 'WandbVisBackend',
'TensorboardVisBackend', 'MLflowVisBackend', 'ClearMLVisBackend',
'NeptuneVisBackend'
]
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=5,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (10 GPUs) x (5 samples per GPU)
auto_scale_lr = dict(base_batch_size=50)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=5,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseTripletLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseTripletLoss(model), lambda_corpus=3e-5, lambda_query=5e-5)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
|
"""Weaviate reader."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WeaviateReader(BaseReader):
"""
Weaviate reader.
Retrieves documents from Weaviate through vector lookup. Allows option
to concatenate retrieved documents into one Document, or to return
separate Document objects per document.
Args:
host (str): host.
auth_client_secret (Optional[weaviate.auth.AuthCredentials]):
auth_client_secret.
"""
def __init__(
self,
host: str,
auth_client_secret: Optional[Any] = None,
) -> None:
"""Initialize with parameters."""
try:
import weaviate # noqa
from weaviate import Client
from weaviate.auth import AuthCredentials # noqa
except ImportError:
raise ImportError(
"`weaviate` package not found, please run `pip install weaviate-client`"
)
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
def load_data(
self,
class_name: Optional[str] = None,
properties: Optional[List[str]] = None,
graphql_query: Optional[str] = None,
separate_documents: Optional[bool] = True,
) -> List[Document]:
"""
Load data from Weaviate.
If `graphql_query` is not found in load_kwargs, we assume that
`class_name` and `properties` are provided.
Args:
class_name (Optional[str]): class_name to retrieve documents from.
properties (Optional[List[str]]): properties to retrieve from documents.
graphql_query (Optional[str]): Raw GraphQL Query.
We assume that the query is a Get query.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
if class_name is not None and properties is not None:
props_txt = "\n".join(properties)
graphql_query = f"""
{{
Get {{
{class_name} {{
{props_txt}
}}
}}
}}
"""
elif graphql_query is not None:
pass
else:
raise ValueError(
"Either `class_name` and `properties` must be specified, "
"or `graphql_query` must be specified."
)
response = self.client.query.raw(graphql_query)
if "errors" in response:
raise ValueError("Invalid query, got errors: {}".format(response["errors"]))
data_response = response["data"]
if "Get" not in data_response:
raise ValueError("Invalid query response, must be a Get query.")
if class_name is None:
# infer class_name if only graphql_query was provided
class_name = next(iter(data_response["Get"].keys()))
entries = data_response["Get"][class_name]
documents = []
for entry in entries:
embedding: Optional[List[float]] = None
# for each entry, join properties into <property>:<value>
# separated by newlines
text_list = []
for k, v in entry.items():
if k == "_additional":
if "vector" in v:
embedding = v["vector"]
continue
text_list.append(f"{k}: {v}")
text = "\n".join(text_list)
documents.append(Document(text=text, embedding=embedding))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
"""Weaviate reader."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WeaviateReader(BaseReader):
"""Weaviate reader.
Retrieves documents from Weaviate through vector lookup. Allows option
to concatenate retrieved documents into one Document, or to return
separate Document objects per document.
Args:
host (str): host.
auth_client_secret (Optional[weaviate.auth.AuthCredentials]):
auth_client_secret.
"""
def __init__(
self,
host: str,
auth_client_secret: Optional[Any] = None,
) -> None:
"""Initialize with parameters."""
try:
import weaviate # noqa
from weaviate import Client
from weaviate.auth import AuthCredentials # noqa
except ImportError:
raise ImportError(
"`weaviate` package not found, please run `pip install weaviate-client`"
)
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
def load_data(
self,
class_name: Optional[str] = None,
properties: Optional[List[str]] = None,
graphql_query: Optional[str] = None,
separate_documents: Optional[bool] = True,
) -> List[Document]:
"""Load data from Weaviate.
If `graphql_query` is not found in load_kwargs, we assume that
`class_name` and `properties` are provided.
Args:
class_name (Optional[str]): class_name to retrieve documents from.
properties (Optional[List[str]]): properties to retrieve from documents.
graphql_query (Optional[str]): Raw GraphQL Query.
We assume that the query is a Get query.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
if class_name is not None and properties is not None:
props_txt = "\n".join(properties)
graphql_query = f"""
{{
Get {{
{class_name} {{
{props_txt}
}}
}}
}}
"""
elif graphql_query is not None:
pass
else:
raise ValueError(
"Either `class_name` and `properties` must be specified, "
"or `graphql_query` must be specified."
)
response = self.client.query.raw(graphql_query)
if "errors" in response:
raise ValueError("Invalid query, got errors: {}".format(response["errors"]))
data_response = response["data"]
if "Get" not in data_response:
raise ValueError("Invalid query response, must be a Get query.")
if class_name is None:
# infer class_name if only graphql_query was provided
class_name = next(iter(data_response["Get"].keys()))
entries = data_response["Get"][class_name]
documents = []
for entry in entries:
embedding: Optional[List[float]] = None
# for each entry, join properties into <property>:<value>
# separated by newlines
text_list = []
for k, v in entry.items():
if k == "_additional":
if "vector" in v:
embedding = v["vector"]
continue
text_list.append(f"{k}: {v}")
text = "\n".join(text_list)
documents.append(Document(text=text, embedding=embedding))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
from typing import Sequence, cast
import prisma.enums
import prisma.types
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"Nodes": {"include": AGENT_NODE_INCLUDE}
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
"include": {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
def graph_execution_include(
include_block_ids: Sequence[str],
) -> prisma.types.AgentGraphExecutionInclude:
return {
"NodeExecutions": {
**cast(
prisma.types.FindManyAgentNodeExecutionArgsFromAgentGraphExecution,
GRAPH_EXECUTION_INCLUDE_WITH_NODES["NodeExecutions"], # type: ignore
),
"where": {
"Node": {
"is": {"AgentBlock": {"is": {"id": {"in": include_block_ids}}}}
},
"NOT": [
{"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE}
],
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE}
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"AgentGraph": {
"include": {
**AGENT_GRAPH_INCLUDE,
"Executions": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
from typing import cast
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"Nodes": {"include": AGENT_NODE_INCLUDE}
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
"include": {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
**cast(
prisma.types.FindManyAgentNodeExecutionArgsFromAgentGraphExecution,
GRAPH_EXECUTION_INCLUDE_WITH_NODES["NodeExecutions"],
),
"where": {
"Node": {"is": {"AgentBlock": {"is": {"id": {"in": IO_BLOCK_IDs}}}}},
"NOT": [{"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE}],
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE}
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"AgentGraph": {
"include": {
**AGENT_GRAPH_INCLUDE,
"Executions": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
from typing import Any, List, Optional, Tuple
import numpy as np
import pytest
from docarray import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
from docarray.utils._internal.pydantic import is_pydantic_v2
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
@pytest.fixture
def nested_docs_docvec():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocVec[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
assert isinstance(d['docs'], list)
assert not isinstance(d['docs'], DocList)
def test_nested_docvec_to_dict(nested_docs_docvec):
d = nested_docs_docvec.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs):
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
def test_nested_to_json(nested_docs):
d = nested_docs.json()
nested_docs.__class__.parse_raw(d)
@pytest.fixture
def nested_none_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: Optional[DocList[SimpleDoc]] = None
hello: str = 'world'
nested_docs = NestedDoc()
return nested_docs
def test_nested_none_to_dict(nested_none_docs):
d = nested_none_docs.dict()
assert d == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_nested_none_to_json(nested_none_docs):
d = nested_none_docs.json()
d = nested_none_docs.__class__.parse_raw(d)
assert d.dict() == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_get_get_field_inner_type():
class MyDoc(BaseDoc):
tuple_: Tuple
field_type = MyDoc._get_field_inner_type("tuple_")
assert field_type == Any
|
from typing import Any, List, Optional, Tuple
import numpy as np
import pytest
from docarray import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
from docarray.utils._internal.pydantic import is_pydantic_v2
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
@pytest.fixture
def nested_docs_docvec():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocVec[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
assert isinstance(d['docs'], list)
assert not isinstance(d['docs'], DocList)
def test_nested_docvec_to_dict(nested_docs_docvec):
d = nested_docs_docvec.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs):
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_nested_to_json(nested_docs):
d = nested_docs.json()
nested_docs.__class__.parse_raw(d)
@pytest.fixture
def nested_none_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: Optional[DocList[SimpleDoc]] = None
hello: str = 'world'
nested_docs = NestedDoc()
return nested_docs
def test_nested_none_to_dict(nested_none_docs):
d = nested_none_docs.dict()
assert d == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_nested_none_to_json(nested_none_docs):
d = nested_none_docs.json()
d = nested_none_docs.__class__.parse_raw(d)
assert d.dict() == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_get_get_field_inner_type():
class MyDoc(BaseDoc):
tuple_: Tuple
field_type = MyDoc._get_field_inner_type("tuple_")
assert field_type == Any
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = './solov2_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101, init_cfg=dict(checkpoint='torchvision://resnet101')))
|
_base_ = 'solov2_r50_fpn_mstrain_3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101, init_cfg=dict(checkpoint='torchvision://resnet101')))
|
# ReAct agent formatter
import logging
from abc import abstractmethod
from typing import List, Optional, Sequence
from llama_index.core.agent.react.prompts import (
CONTEXT_REACT_CHAT_SYSTEM_HEADER,
REACT_CHAT_SYSTEM_HEADER,
)
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ObservationReasoningStep,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict, Field
from llama_index.core.tools import BaseTool
logger = logging.getLogger(__name__)
def get_react_tool_descriptions(tools: Sequence[BaseTool]) -> List[str]:
"""Tool."""
tool_descs = []
for tool in tools:
tool_desc = (
f"> Tool Name: {tool.metadata.name}\n"
f"Tool Description: {tool.metadata.description}\n"
f"Tool Args: {tool.metadata.fn_schema_str}\n"
)
tool_descs.append(tool_desc)
return tool_descs
# TODO: come up with better name
class BaseAgentChatFormatter(BaseModel):
"""Base chat formatter."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
class ReActChatFormatter(BaseAgentChatFormatter):
"""ReAct chat formatter."""
system_header: str = REACT_CHAT_SYSTEM_HEADER # default
context: str = "" # not needed w/ default
observation_role: MessageRole = Field(
default=MessageRole.USER,
description=(
"Message role of tool outputs. If the LLM you use supports function/tool "
"calling, you may set it to `MessageRole.TOOL` to avoid the tool outputs "
"being misinterpreted as new user messages."
),
)
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
current_reasoning = current_reasoning or []
format_args = {
"tool_desc": "\n".join(get_react_tool_descriptions(tools)),
"tool_names": ", ".join([tool.metadata.get_name() for tool in tools]),
}
if self.context:
format_args["context"] = self.context
fmt_sys_header = self.system_header.format(**format_args)
# format reasoning history as alternating user and assistant messages
# where the assistant messages are thoughts and actions and the tool
# messages are observations
reasoning_history = []
for reasoning_step in current_reasoning:
if isinstance(reasoning_step, ObservationReasoningStep):
message = ChatMessage(
role=self.observation_role,
content=reasoning_step.get_content(),
)
else:
message = ChatMessage(
role=MessageRole.ASSISTANT,
content=reasoning_step.get_content(),
)
reasoning_history.append(message)
return [
ChatMessage(role=MessageRole.SYSTEM, content=fmt_sys_header),
*chat_history,
*reasoning_history,
]
@classmethod
def from_defaults(
cls,
system_header: Optional[str] = None,
context: Optional[str] = None,
observation_role: MessageRole = MessageRole.USER,
) -> "ReActChatFormatter":
"""Create ReActChatFormatter from defaults."""
if not system_header:
system_header = (
REACT_CHAT_SYSTEM_HEADER
if not context
else CONTEXT_REACT_CHAT_SYSTEM_HEADER
)
return ReActChatFormatter(
system_header=system_header,
context=context or "",
observation_role=observation_role,
)
@classmethod
def from_context(cls, context: str) -> "ReActChatFormatter":
"""
Create ReActChatFormatter from context.
NOTE: deprecated
"""
logger.warning(
"ReActChatFormatter.from_context is deprecated, please use `from_defaults` instead."
)
return ReActChatFormatter.from_defaults(
system_header=CONTEXT_REACT_CHAT_SYSTEM_HEADER, context=context
)
|
# ReAct agent formatter
import logging
from abc import abstractmethod
from typing import List, Optional, Sequence
from llama_index.core.agent.react.prompts import (
CONTEXT_REACT_CHAT_SYSTEM_HEADER,
REACT_CHAT_SYSTEM_HEADER,
)
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ObservationReasoningStep,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict, Field
from llama_index.core.tools import BaseTool
logger = logging.getLogger(__name__)
def get_react_tool_descriptions(tools: Sequence[BaseTool]) -> List[str]:
"""Tool."""
tool_descs = []
for tool in tools:
tool_desc = (
f"> Tool Name: {tool.metadata.name}\n"
f"Tool Description: {tool.metadata.description}\n"
f"Tool Args: {tool.metadata.fn_schema_str}\n"
)
tool_descs.append(tool_desc)
return tool_descs
# TODO: come up with better name
class BaseAgentChatFormatter(BaseModel):
"""Base chat formatter."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
class ReActChatFormatter(BaseAgentChatFormatter):
"""ReAct chat formatter."""
system_header: str = REACT_CHAT_SYSTEM_HEADER # default
context: str = "" # not needed w/ default
observation_role: MessageRole = Field(
default=MessageRole.USER,
description=(
"Message role of tool outputs. If the LLM you use supports function/tool "
"calling, you may set it to `MessageRole.TOOL` to avoid the tool outputs "
"being misinterpreted as new user messages."
),
)
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
current_reasoning = current_reasoning or []
format_args = {
"tool_desc": "\n".join(get_react_tool_descriptions(tools)),
"tool_names": ", ".join([tool.metadata.get_name() for tool in tools]),
}
if self.context:
format_args["context"] = self.context
fmt_sys_header = self.system_header.format(**format_args)
# format reasoning history as alternating user and assistant messages
# where the assistant messages are thoughts and actions and the tool
# messages are observations
reasoning_history = []
for reasoning_step in current_reasoning:
if isinstance(reasoning_step, ObservationReasoningStep):
message = ChatMessage(
role=self.observation_role,
content=reasoning_step.get_content(),
)
else:
message = ChatMessage(
role=MessageRole.ASSISTANT,
content=reasoning_step.get_content(),
)
reasoning_history.append(message)
return [
ChatMessage(role=MessageRole.SYSTEM, content=fmt_sys_header),
*chat_history,
*reasoning_history,
]
@classmethod
def from_defaults(
cls,
system_header: Optional[str] = None,
context: Optional[str] = None,
observation_role: MessageRole = MessageRole.USER,
) -> "ReActChatFormatter":
"""Create ReActChatFormatter from defaults."""
if not system_header:
system_header = (
REACT_CHAT_SYSTEM_HEADER
if not context
else CONTEXT_REACT_CHAT_SYSTEM_HEADER
)
return ReActChatFormatter(
system_header=system_header,
context=context or "",
observation_role=observation_role,
)
@classmethod
def from_context(cls, context: str) -> "ReActChatFormatter":
"""Create ReActChatFormatter from context.
NOTE: deprecated
"""
logger.warning(
"ReActChatFormatter.from_context is deprecated, please use `from_defaults` instead."
)
return ReActChatFormatter.from_defaults(
system_header=CONTEXT_REACT_CHAT_SYSTEM_HEADER, context=context
)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageNdArray, ImageTensor, ImageTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import tnp
from docarray.typing.tensor.image import ImageTensorFlowTensor
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(ImageTorchTensor, torch.zeros((224, 224, 3))),
(ImageNdArray, np.zeros((224, 224, 3))),
],
)
def test_save_image_tensor_to_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(cls_tensor, tensor)
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_image_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(ImageTensorFlowTensor, tf.zeros((224, 224, 3)))
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'image_tensor',
[
parse_obj_as(ImageTorchTensor, torch.zeros(224, 224, 3)),
parse_obj_as(ImageNdArray, np.zeros((224, 224, 3))),
],
)
def test_save_image_tensor_to_bytes(image_tensor):
b = image_tensor.to_bytes()
isinstance(b, bytes)
isinstance(b, ImageBytes)
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), ImageTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), ImageNdArray, np.ndarray),
],
)
def test_torch_ndarray_to_image_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyImageDoc(BaseDoc):
tensor: ImageTensor
doc = MyImageDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_tensorflow_to_image_tensor():
class MyImageDoc(BaseDoc):
tensor: ImageTensor
doc = MyImageDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, ImageTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray.typing import ImageBytes, ImageNdArray, ImageTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.image import ImageTensorFlowTensor
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(ImageTorchTensor, torch.zeros((224, 224, 3))),
(ImageNdArray, np.zeros((224, 224, 3))),
],
)
def test_save_image_tensor_to_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(cls_tensor, tensor)
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_image_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.jpg')
image_tensor = parse_obj_as(ImageTensorFlowTensor, tf.zeros((224, 224, 3)))
image_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'image_tensor',
[
parse_obj_as(ImageTorchTensor, torch.zeros(224, 224, 3)),
parse_obj_as(ImageNdArray, np.zeros((224, 224, 3))),
],
)
def test_save_image_tensor_to_bytes(image_tensor):
b = image_tensor.to_bytes()
isinstance(b, bytes)
isinstance(b, ImageBytes)
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from .conv_emformer import ConvEmformer
from .conv_tasnet import conv_tasnet_base
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conv_tasnet_base",
"ConvEmformer",
]
|
from .conv_emformer import ConvEmformer
from .conv_tasnet import conv_tasnet_base
from .hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conv_tasnet_base",
"ConvEmformer",
"HDemucs",
"hdemucs_high",
"hdemucs_medium",
"hdemucs_low",
]
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from langchain_core.callbacks import Callbacks
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
ahandle_event,
atrace_as_chain_group,
handle_event,
trace_as_chain_group,
)
from langchain_core.tracers.context import (
collect_runs,
tracing_enabled,
tracing_v2_enabled,
)
from langchain_core.utils.env import env_var_is_set
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.manager import (
get_openai_callback,
wandb_tracing_enabled,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"get_openai_callback": "langchain_community.callbacks.manager",
"wandb_tracing_enabled": "langchain_community.callbacks.manager",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ahandle_event",
"AsyncCallbackManagerForChainGroup",
"AsyncCallbackManagerForChainRun",
"AsyncCallbackManagerForLLMRun",
"AsyncCallbackManagerForRetrieverRun",
"AsyncCallbackManagerForToolRun",
"AsyncParentRunManager",
"AsyncRunManager",
"atrace_as_chain_group",
"BaseRunManager",
"CallbackManager",
"CallbackManagerForChainGroup",
"CallbackManagerForChainRun",
"CallbackManagerForLLMRun",
"CallbackManagerForRetrieverRun",
"CallbackManagerForToolRun",
"Callbacks",
"AsyncCallbackManager",
"collect_runs",
"env_var_is_set",
"get_openai_callback",
"handle_event",
"ParentRunManager",
"RunManager",
"trace_as_chain_group",
"tracing_enabled",
"tracing_v2_enabled",
"wandb_tracing_enabled",
]
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
Callbacks,
ParentRunManager,
RunManager,
ahandle_event,
atrace_as_chain_group,
handle_event,
trace_as_chain_group,
)
from langchain_core.tracers.context import (
collect_runs,
tracing_enabled,
tracing_v2_enabled,
)
from langchain_core.utils.env import env_var_is_set
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.manager import (
get_openai_callback,
wandb_tracing_enabled,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"get_openai_callback": "langchain_community.callbacks.manager",
"wandb_tracing_enabled": "langchain_community.callbacks.manager",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ahandle_event",
"AsyncCallbackManagerForChainGroup",
"AsyncCallbackManagerForChainRun",
"AsyncCallbackManagerForLLMRun",
"AsyncCallbackManagerForRetrieverRun",
"AsyncCallbackManagerForToolRun",
"AsyncParentRunManager",
"AsyncRunManager",
"atrace_as_chain_group",
"BaseRunManager",
"CallbackManager",
"CallbackManagerForChainGroup",
"CallbackManagerForChainRun",
"CallbackManagerForLLMRun",
"CallbackManagerForRetrieverRun",
"CallbackManagerForToolRun",
"Callbacks",
"AsyncCallbackManager",
"collect_runs",
"env_var_is_set",
"get_openai_callback",
"handle_event",
"ParentRunManager",
"RunManager",
"trace_as_chain_group",
"tracing_enabled",
"tracing_v2_enabled",
"wandb_tracing_enabled",
]
|
from sentence_transformers import SentenceTransformer
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import Dict, List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target sentence embedding.
The MSE is computed between ||teacher.encode(source_sentences) - student.encode(target_sentences)||.
For multilingual knowledge distillation (https://arxiv.org/abs/2004.09813), source_sentences are in English
and target_sentences are in a different language like German, Chinese, Spanish...
:param source_sentences: Source sentences are embedded with the teacher model
:param target_sentences: Target sentences are ambedding with the student model.
:param show_progress_bar: Show progress bar when computing embeddings
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
source_sentences: List[str],
target_sentences: List[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
super().__init__()
self.truncate_dim = truncate_dim
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
self.source_embeddings = teacher_model.encode(
source_sentences, show_progress_bar=show_progress_bar, batch_size=batch_size, convert_to_numpy=True
)
self.target_sentences = target_sentences
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
self.csv_file = "mse_evaluation_" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "MSE"]
self.write_csv = write_csv
self.primary_metric = "negative_mse"
def __call__(self, model: SentenceTransformer, output_path, epoch=-1, steps=-1) -> Dict[str, float]:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}"
else:
out_txt = f" in epoch {epoch} after {steps} steps"
else:
out_txt = ""
if self.truncate_dim is not None:
out_txt += f" (truncated to {self.truncate_dim})"
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
target_embeddings = model.encode(
self.target_sentences,
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size,
convert_to_numpy=True,
)
mse = ((self.source_embeddings - target_embeddings) ** 2).mean()
mse *= 100
logger.info(f"MSE evaluation (lower = better) on the {self.name} dataset{out_txt}:")
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mse])
# Return negative score as SentenceTransformers maximizes the performance
metrics = {"negative_mse": -mse}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
@property
def description(self) -> str:
return "Knowledge Distillation"
|
from sentence_transformers import SentenceTransformer
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target sentence embedding.
The MSE is computed between ||teacher.encode(source_sentences) - student.encode(target_sentences)||.
For multilingual knowledge distillation (https://arxiv.org/abs/2004.09813), source_sentences are in English
and target_sentences are in a different language like German, Chinese, Spanish...
:param source_sentences: Source sentences are embedded with the teacher model
:param target_sentences: Target sentences are ambedding with the student model.
:param show_progress_bar: Show progress bar when computing embeddings
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
source_sentences: List[str],
target_sentences: List[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
self.truncate_dim = truncate_dim
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
self.source_embeddings = teacher_model.encode(
source_sentences, show_progress_bar=show_progress_bar, batch_size=batch_size, convert_to_numpy=True
)
self.target_sentences = target_sentences
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
self.csv_file = "mse_evaluation_" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "MSE"]
self.write_csv = write_csv
def __call__(self, model: SentenceTransformer, output_path, epoch=-1, steps=-1):
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}"
else:
out_txt = f" in epoch {epoch} after {steps} steps"
else:
out_txt = ""
if self.truncate_dim is not None:
out_txt += f" (truncated to {self.truncate_dim})"
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
target_embeddings = model.encode(
self.target_sentences,
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size,
convert_to_numpy=True,
)
mse = ((self.source_embeddings - target_embeddings) ** 2).mean()
mse *= 100
logger.info(f"MSE evaluation (lower = better) on the {self.name} dataset{out_txt}:")
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mse])
return -mse # Return negative score as SentenceTransformers maximizes the performance
|
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Initialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(convert_to_numpy, batch)
def get_jax_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
if isinstance(x, tf.SparseTensor):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
else:
# We use numpy as an intermediary because it is faster.
return convert_to_numpy(x)
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.src.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1, output_type=tf.int32),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
|
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Initialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(convert_to_numpy, batch)
def get_jax_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
if isinstance(x, tf.SparseTensor):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
else:
# We use numpy as an intermediary because it is faster.
return convert_to_numpy(x)
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.src.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model = CrossEncoder("distilroberta-base", num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# We might want to remap labels from the dataset, you can do that like so:
mapping = {0: 1, 1: 2, 2: 0}
eval_dataset = eval_dataset.map(lambda x: {"label": mapping[x["label"]]})
test_dataset = test_dataset.map(lambda x: {"label": mapping[x["label"]]})
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])), eval_dataset["label"], name="AllNLI-dev"
)
dev_cls_evaluator(model)
# 5. Define the training arguments
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name="ce-nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])), test_dataset["label"], name="AllNLI-test"
)
test_cls_evaluator(model)
# 8. Evaluate the final model and save it
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model = CrossEncoder("distilroberta-base", num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# We might want to remap labels from the dataset, you can do that like so:
mapping = {0: 1, 1: 2, 2: 0}
eval_dataset = eval_dataset.map(lambda x: {"label": mapping[x["label"]]})
test_dataset = test_dataset.map(lambda x: {"label": mapping[x["label"]]})
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])), eval_dataset["label"], name="AllNLI-dev"
)
dev_cls_evaluator(model)
# 5. Define the training arguments
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name="ce-nli-v1", # Will be used in W&B if `wandb` is installed
)
# # 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])), test_dataset["label"], name="AllNLI-test"
)
test_cls_evaluator(model)
# 8. Evaluate the final model and save it
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export', 'auth'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(
title=' '.join(sys.argv),
box=box.ROUNDED,
highlight=True,
title_justify='left',
)
param_str.add_column('Argument', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
param = k.replace('_', '-')
value = str(v)
if not default_args.get(k, None) == v:
value = f'[b]{value}[/]'
param_str.add_row(param, value)
if 'JINA_LOG_NO_COLOR' not in os.environ:
print(f'\n{logo_str}\n')
console.print(param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from jina_cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _try_plugin_command():
"""Tries to call the CLI of an external Jina project.
:return: if the plugin has been found (locally or among the known plugins)
"""
argv = sys.argv
if len(argv) < 2: # no command given
return False
from jina_cli.autocomplete import ac_table
if argv[1] in ac_table['commands']: # native command can't be plugin command
return False
def _cmd_exists(cmd):
return shutil.which(cmd) is not None
subcommand = argv[1]
cmd = 'jina-' + subcommand
if _cmd_exists(cmd):
subprocess.run([cmd] + argv[2:])
return True
from jina_cli.known_plugins import plugin_info
if subcommand in plugin_info:
from jina.helper import get_rich_console
cmd_info = plugin_info[subcommand]
project, package = cmd_info['display-name'], cmd_info['pip-package']
console = get_rich_console()
console.print(
f"It seems like [yellow]{project}[/yellow] is not installed in your environment."
f"To use it via the [green]'jina {subcommand}'[/green] command, "
f"install it first: [green]'pip install {package}'[/green]."
)
return True
return False
def main():
"""The main entrypoint of the CLI"""
found_plugin = _try_plugin_command()
if not found_plugin:
_quick_ac_lookup()
from jina_cli import api
args = _get_run_args()
getattr(api, args.cli.replace('-', '_'))(args)
|
import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(
title=' '.join(sys.argv),
box=box.ROUNDED,
highlight=True,
title_justify='left',
)
param_str.add_column('Argument', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
param = k.replace('_', '-')
value = str(v)
if not default_args.get(k, None) == v:
value = f'[b]{value}[/]'
param_str.add_row(param, value)
if 'JINA_LOG_NO_COLOR' not in os.environ:
print(f'\n{logo_str}\n')
console.print(param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from jina_cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _try_plugin_command():
"""Tries to call the CLI of an external Jina project.
:return: if the plugin has been found (locally or among the known plugins)
"""
argv = sys.argv
if len(argv) < 2: # no command given
return False
from jina_cli.autocomplete import ac_table
if argv[1] in ac_table['commands']: # native command can't be plugin command
return False
def _cmd_exists(cmd):
return shutil.which(cmd) is not None
subcommand = argv[1]
cmd = 'jina-' + subcommand
if _cmd_exists(cmd):
subprocess.run([cmd] + argv[2:])
return True
from jina_cli.known_plugins import plugin_info
if subcommand in plugin_info:
from jina.helper import get_rich_console
cmd_info = plugin_info[subcommand]
project, package = cmd_info['display-name'], cmd_info['pip-package']
console = get_rich_console()
console.print(
f"It seems like [yellow]{project}[/yellow] is not installed in your environment."
f"To use it via the [green]'jina {subcommand}'[/green] command, "
f"install it first: [green]'pip install {package}'[/green]."
)
return True
return False
def main():
"""The main entrypoint of the CLI"""
found_plugin = _try_plugin_command()
if not found_plugin:
_quick_ac_lookup()
from jina_cli import api
args = _get_run_args()
getattr(api, args.cli.replace('-', '_'))(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""copy from
https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py."""
import torch
from torch import Tensor
from mmdet.core.bbox.assigners import AssignResult
from .sampling_result import SamplingResult
class MaskSamplingResult(SamplingResult):
"""Mask sampling result."""
def __init__(self,
pos_inds: Tensor,
neg_inds: Tensor,
masks: Tensor,
gt_masks: Tensor,
assign_result: AssignResult,
gt_flags: Tensor,
avg_factor_with_neg: bool = True) -> None:
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.num_pos = max(pos_inds.numel(), 1)
self.num_neg = max(neg_inds.numel(), 1)
self.avg_factor = self.num_pos + self.num_neg \
if avg_factor_with_neg else self.num_pos
self.pos_masks = masks[pos_inds]
self.neg_masks = masks[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_masks.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_masks.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_masks = torch.empty_like(gt_masks)
else:
self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :]
@property
def masks(self) -> Tensor:
"""torch.Tensor: concatenated positive and negative masks."""
return torch.cat([self.pos_masks, self.neg_masks])
def __nice__(self) -> str:
data = self.info.copy()
data['pos_masks'] = data.pop('pos_masks').shape
data['neg_masks'] = data.pop('neg_masks').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self) -> dict:
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_masks': self.pos_masks,
'neg_masks': self.neg_masks,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
|
# Copyright (c) OpenMMLab. All rights reserved.
"""copy from
https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py."""
import torch
from .sampling_result import SamplingResult
class MaskSamplingResult(SamplingResult):
"""Mask sampling result."""
def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_masks = masks[pos_inds]
self.neg_masks = masks[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_masks.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_masks.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_masks = torch.empty_like(gt_masks)
else:
self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def masks(self):
"""torch.Tensor: concatenated positive and negative boxes"""
return torch.cat([self.pos_masks, self.neg_masks])
def __nice__(self):
data = self.info.copy()
data['pos_masks'] = data.pop('pos_masks').shape
data['neg_masks'] = data.pop('neg_masks').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self):
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_masks': self.pos_masks,
'neg_masks': self.neg_masks,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json")) as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json")) as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.fashion_mnist import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.fashion_mnist import load_data
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone: Union[ConfigDict, dict],
neck: Union[ConfigDict, dict],
bbox_head: Union[ConfigDict, dict],
train_cfg: Optional[Union[ConfigDict, dict]] = None,
test_cfg: Optional[Union[ConfigDict, dict]] = None,
preprocess_cfg: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # type: ignore[no-redef] # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Union
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Union
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
# List of InstanceData
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
# List of DetDataSample
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import backend_utils
class BackendUtilsTest(testing.TestCase):
@parameterized.named_parameters(
("numpy", "numpy"),
("jax", "jax"),
("tensorflow", "tensorflow"),
("torch", "torch"),
)
def test_dynamic_backend(self, name):
dynamic_backend = backend_utils.DynamicBackend()
x = np.random.uniform(size=[1, 2, 3]).astype("float32")
if name == "numpy":
dynamic_backend.set_backend(name)
if backend.backend() != "numpy":
with self.assertRaisesRegex(
NotImplementedError,
"Currently, we cannot dynamically import the numpy backend",
):
y = dynamic_backend.numpy.log10(x)
else:
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, np.ndarray)
elif name == "jax":
import jax
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, jax.Array)
elif name == "tensorflow":
import tensorflow as tf
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, tf.Tensor)
elif name == "torch":
import torch
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, torch.Tensor)
def test_dynamic_backend_invalid_name(self):
dynamic_backend = backend_utils.DynamicBackend()
with self.assertRaisesRegex(ValueError, "Available backends are"):
dynamic_backend.set_backend("abc")
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import backend_utils
class BackendUtilsTest(testing.TestCase):
@parameterized.named_parameters(
("numpy", "numpy"),
("jax", "jax"),
("tensorflow", "tensorflow"),
("torch", "torch"),
)
def test_dynamic_backend(self, name):
dynamic_backend = backend_utils.DynamicBackend()
x = np.random.uniform(size=[1, 2, 3])
if name == "numpy":
dynamic_backend.set_backend(name)
if backend.backend() != "numpy":
with self.assertRaisesRegex(
NotImplementedError,
"Currently, we cannot dynamically import the numpy backend",
):
y = dynamic_backend.numpy.log10(x)
else:
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, np.ndarray)
elif name == "jax":
import jax
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, jax.Array)
elif name == "tensorflow":
import tensorflow as tf
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, tf.Tensor)
elif name == "torch":
import torch
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, torch.Tensor)
def test_dynamic_backend_invalid_name(self):
dynamic_backend = backend_utils.DynamicBackend()
with self.assertRaisesRegex(ValueError, "Available backends are"):
dynamic_backend.set_backend("abc")
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmcv.cnn import constant_init
from mmdet.models.utils import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
|
_base_ = './solo_r50_fpn_8xb8-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './solo_r50_fpn_lsj_200e_8x8_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval: int = 50) -> None:
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def _after_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Union[Sequence[BaseDataElement],
dict]] = None,
mode: str = 'train') -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (Union[Sequence[:obj:`BaseDataElement`], dict], optional):
Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def after_iter(self, runner):
if self.every_n_iters(runner, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
create_dct,
DB_to_amplitude,
detect_pitch_frequency,
edit_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Query Sparsity: Active Dimensions: 74.9, Sparsity Ratio: 0.9975
Model Corpus Sparsity: Active Dimensions: 174.8, Sparsity Ratio: 0.9943
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Sparsity Stats Query : Row Non-Zero Mean: 74.93406589214618, Row Sparsity Mean: 0.9975449305314285
Model Sparsity Stats Corpus : Row Non-Zero Mean: 174.8070262028621, Row Sparsity Mean: 0.9942727547425491
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from torch.utils.data import BatchSampler, Sampler
from mmdet.datasets.samplers.track_img_sampler import TrackImgSampler
from mmdet.registry import DATA_SAMPLERS
# TODO: maybe replace with a data_loader wrapper
@DATA_SAMPLERS.register_module()
class AspectRatioBatchSampler(BatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __init__(self,
sampler: Sampler,
batch_size: int,
drop_last: bool = False) -> None:
if not isinstance(sampler, Sampler):
raise TypeError('sampler should be an instance of ``Sampler``, '
f'but got {sampler}')
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError('batch_size should be a positive integer value, '
f'but got batch_size={batch_size}')
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
# two groups for w < h and w >= h
self._aspect_ratio_buckets = [[] for _ in range(2)]
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
data_info = self.sampler.dataset.get_data_info(idx)
width, height = data_info['width'], data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
def __len__(self) -> int:
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
@DATA_SAMPLERS.register_module()
class TrackAspectRatioBatchSampler(AspectRatioBatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
# hard code to solve TrackImgSampler
if isinstance(self.sampler, TrackImgSampler):
video_idx, _ = idx
else:
video_idx = idx
# video_idx
data_info = self.sampler.dataset.get_data_info(video_idx)
# data_info {video_id, images, video_length}
img_data_info = data_info['images'][0]
width, height = img_data_info['width'], img_data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from torch.utils.data import BatchSampler, Sampler
from mmdet.registry import DATA_SAMPLERS
# TODO: maybe replace with a data_loader wrapper
@DATA_SAMPLERS.register_module()
class AspectRatioBatchSampler(BatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __init__(self,
sampler: Sampler,
batch_size: int,
drop_last: bool = False) -> None:
if not isinstance(sampler, Sampler):
raise TypeError('sampler should be an instance of ``Sampler``, '
f'but got {sampler}')
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError('batch_size should be a positive integer value, '
f'but got batch_size={batch_size}')
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
# two groups for w < h and w >= h
self._aspect_ratio_buckets = [[] for _ in range(2)]
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
data_info = self.sampler.dataset.get_data_info(idx)
width, height = data_info['width'], data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
def __len__(self) -> int:
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
|
from __future__ import annotations
import re
from typing import Optional
from langchain_core.output_parsers import BaseOutputParser
class RegexDictParser(BaseOutputParser[dict[str, str]]):
"""Parse the output of an LLM call into a Dictionary using a regex."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:
"""The regex pattern to use to parse the output."""
output_key_to_format: dict[str, str]
"""The keys to use for the output."""
no_update_value: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_dict_parser"
def parse(self, text: str) -> dict[str, str]:
"""Parse the output of an LLM call."""
result = {}
for output_key, expected_format in self.output_key_to_format.items():
specific_regex = self.regex_pattern.format(re.escape(expected_format))
matches = re.findall(specific_regex, text)
if not matches:
raise ValueError(
f"No match found for output key: {output_key} with expected format \
{expected_format} on text {text}"
)
elif len(matches) > 1:
raise ValueError(
f"Multiple matches found for output key: {output_key} with \
expected format {expected_format} on text {text}"
)
elif (
self.no_update_value is not None and matches[0] == self.no_update_value
):
continue
else:
result[output_key] = matches[0]
return result
|
from __future__ import annotations
import re
from typing import Dict, Optional
from langchain_core.output_parsers import BaseOutputParser
class RegexDictParser(BaseOutputParser[Dict[str, str]]):
"""Parse the output of an LLM call into a Dictionary using a regex."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:
"""The regex pattern to use to parse the output."""
output_key_to_format: Dict[str, str]
"""The keys to use for the output."""
no_update_value: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_dict_parser"
def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
result = {}
for output_key, expected_format in self.output_key_to_format.items():
specific_regex = self.regex_pattern.format(re.escape(expected_format))
matches = re.findall(specific_regex, text)
if not matches:
raise ValueError(
f"No match found for output key: {output_key} with expected format \
{expected_format} on text {text}"
)
elif len(matches) > 1:
raise ValueError(
f"Multiple matches found for output key: {output_key} with \
expected format {expected_format} on text {text}"
)
elif (
self.no_update_value is not None and matches[0] == self.no_update_value
):
continue
else:
result[output_key] = matches[0]
return result
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
DEMO_DIR = tm.demo_dir(__file__)
PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python")
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(PYTHON_DEMO_DIR, "update_process.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(PYTHON_DEMO_DIR, "categorical.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
def test_external_memory_demo():
script = os.path.join(PYTHON_DEMO_DIR, "external_memory.py")
cmd = ["python", script, "--device=cuda"]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
def test_distributed_extmem_basic_demo():
script = os.path.join(PYTHON_DEMO_DIR, "distributed_extmem_basic.py")
cmd = ["python", script, "--device=cuda"]
subprocess.check_call(cmd)
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
DEMO_DIR = tm.demo_dir(__file__)
PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python")
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(PYTHON_DEMO_DIR, "update_process.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(PYTHON_DEMO_DIR, "categorical.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
def test_external_memory_demo():
script = os.path.join(PYTHON_DEMO_DIR, "external_memory.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
from __future__ import annotations
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataSample]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataSample]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
def test_empty_dtype():
tensor = TorchCompBackend.empty((10, 3), dtype=torch.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == torch.int32
def test_empty_device():
tensor = TorchCompBackend.empty((10, 3), device='meta')
assert tensor.shape == (10, 3)
assert tensor.device == torch.device('meta')
def test_squeeze():
tensor = torch.zeros(size=(1, 1, 3, 1))
squeezed = TorchCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
None,
torch.tensor([0, 2, 4, 6, 8, 10]),
),
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
(0, 10),
torch.tensor([0, 1, 2, 3, 4, 5]),
),
(
torch.tensor([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
torch.tensor([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = TorchCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert torch.allclose(output, result)
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
def test_empty_dtype():
tensor = TorchCompBackend.empty((10, 3), dtype=torch.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == torch.int32
def test_empty_device():
tensor = TorchCompBackend.empty((10, 3), device='meta')
assert tensor.shape == (10, 3)
assert tensor.device == torch.device('meta')
|
"""Tool for the OpenAI DALLE V1 Image Generation SDK."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
class OpenAIDALLEImageGenerationTool(BaseTool):
"""Tool that generates an image using OpenAI DALLE."""
name: str = "openai_dalle"
description: str = (
"A wrapper around OpenAI DALLE Image Generation. "
"Useful for when you need to generate an image of"
"people, places, paintings, animals, or other subjects. "
"Input should be a text prompt to generate an image."
)
api_wrapper: DallEAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the OpenAI DALLE Image Generation tool."""
return self.api_wrapper.run(query)
|
"""Tool for the OpenAI DALLE V1 Image Generation SDK."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
class OpenAIDALLEImageGenerationTool(BaseTool): # type: ignore[override]
"""Tool that generates an image using OpenAI DALLE."""
name: str = "openai_dalle"
description: str = (
"A wrapper around OpenAI DALLE Image Generation. "
"Useful for when you need to generate an image of"
"people, places, paintings, animals, or other subjects. "
"Input should be a text prompt to generate an image."
)
api_wrapper: DallEAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the OpenAI DALLE Image Generation tool."""
return self.api_wrapper.run(query)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_persistent_workers_on(
self, mock_is_model_wrapper):
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.detector.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.detector.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
def test_not_model_wrapper_and_persistent_workers_off(self):
runner = Mock()
runner.model = Mock()
runner.model.detector.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = False
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.model.detector.bbox_head.use_l1)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_initialize_after_switching(self, mock_is_model_wrapper):
# This simulates the resumption after the switching.
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 285
runner.max_epochs = 300
# epoch + 1 > max_epochs - num_last_epochs .
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.detector.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_persistent_workers_on(
self, mock_is_model_wrapper):
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.detector.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.detector.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
def test_not_model_wrapper_and_persistent_workers_off(self):
runner = Mock()
runner.model = Mock()
runner.model.detector.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = False
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.model.detector.bbox_head.use_l1)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_initialize_after_switching(self, mock_is_model_wrapper):
# This simulates the resumption after the switching.
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 285
runner.max_epochs = 300
# epoch + 1 > max_epochs - num_last_epochs .
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
|
_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './retinanet_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
def test_top_k_descending_false():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([1, 2, 2]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([0, 2, 6])) or (
tnp.allclose(tnp.squeeze.indices.tensor),
tf.constant([0, 6, 2]),
)
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([1, 2, 2]))
assert tnp.allclose(indices.tensor[0], tf.constant([0, 2, 6])) or tnp.allclose(
indices.tensor[0], tf.constant([0, 6, 2])
)
assert tnp.allclose(vals.tensor[1], tf.constant([2, 3, 4]))
assert tnp.allclose(indices.tensor[1], tf.constant([2, 4, 6]))
@pytest.mark.tensorflow
def test_top_k_descending_true():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([9, 7, 4]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([5, 3, 1]))
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([9, 7, 4]))
assert tnp.allclose(indices.tensor[0], tf.constant([5, 3, 1]))
assert tnp.allclose(vals.tensor[1], tf.constant([11, 10, 7]))
assert tnp.allclose(indices.tensor[1], tf.constant([0, 5, 3]))
|
import pytest
try:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
except (ImportError, TypeError):
pass
@pytest.mark.tensorflow
def test_top_k_descending_false():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([1, 2, 2]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([0, 2, 6])) or (
tnp.allclose(tnp.squeeze.indices.tensor),
tf.constant([0, 6, 2]),
)
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([1, 2, 2]))
assert tnp.allclose(indices.tensor[0], tf.constant([0, 2, 6])) or tnp.allclose(
indices.tensor[0], tf.constant([0, 6, 2])
)
assert tnp.allclose(vals.tensor[1], tf.constant([2, 3, 4]))
assert tnp.allclose(indices.tensor[1], tf.constant([2, 4, 6]))
@pytest.mark.tensorflow
def test_top_k_descending_true():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([9, 7, 4]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([5, 3, 1]))
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([9, 7, 4]))
assert tnp.allclose(indices.tensor[0], tf.constant([5, 3, 1]))
assert tnp.allclose(vals.tensor[1], tf.constant([11, 10, 7]))
assert tnp.allclose(indices.tensor[1], tf.constant([0, 5, 3]))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'Mask2Former'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import SyncBuffersHook
class TestSyncBuffersHook:
def test_sync_buffers_hook(self):
Runner = Mock()
Runner.model = Mock()
Hook = SyncBuffersHook()
Hook._after_epoch(Runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import SyncBuffersHook
class TestSyncBuffersHook:
def test_sync_buffers_hook(self):
Runner = Mock()
Runner.model = Mock()
Hook = SyncBuffersHook()
Hook.after_epoch(Runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
def _check_fields(results, pipeline_results, keys):
"""Check data in fields from two results are same."""
for key in keys:
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert np.equal(results[key].to_ndarray(),
pipeline_results[key].to_ndarray()).all()
else:
assert np.equal(results[key], pipeline_results[key]).all()
assert results[key].dtype == pipeline_results[key].dtype
def check_result_same(results, pipeline_results):
"""Check whether the `pipeline_results` is the same with the predefined
`results`.
Args:
results (dict): Predefined results which should be the standard output
of the transform pipeline.
pipeline_results (dict): Results processed by the transform pipeline.
"""
# check image
_check_fields(results, pipeline_results,
results.get('img_fields', ['img']))
# check bboxes
_check_fields(results, pipeline_results, results.get('bbox_fields', []))
# check masks
_check_fields(results, pipeline_results, results.get('mask_fields', []))
# check segmentations
_check_fields(results, pipeline_results, results.get('seg_fields', []))
# check gt_labels
if 'gt_labels' in results:
assert np.equal(results['gt_labels'],
pipeline_results['gt_labels']).all()
def construct_toy_data(poly2mask=True):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
# image
results['img'] = img
results['img_shape'] = img.shape
results['img_fields'] = ['img']
# bboxes
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32)
results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]],
dtype=np.float32)
# labels
results['gt_labels'] = np.array([1], dtype=np.int64)
# masks
results['mask_fields'] = ['gt_masks']
if poly2mask:
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
else:
raw_masks = [[np.array([0, 0, 2, 0, 2, 1, 0, 1], dtype=np.float)]]
results['gt_masks'] = PolygonMasks(raw_masks, 2, 4)
# segmentations
results['seg_fields'] = ['gt_semantic_seg']
results['gt_semantic_seg'] = img[..., 0]
return results
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
def _check_fields(results, pipeline_results, keys):
"""Check data in fields from two results are same."""
for key in keys:
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert np.equal(results[key].to_ndarray(),
pipeline_results[key].to_ndarray()).all()
else:
assert np.equal(results[key], pipeline_results[key]).all()
assert results[key].dtype == pipeline_results[key].dtype
def check_result_same(results, pipeline_results):
"""Check whether the `pipeline_results` is the same with the predefined
`results`.
Args:
results (dict): Predefined results which should be the standard output
of the transform pipeline.
pipeline_results (dict): Results processed by the transform pipeline.
"""
# check image
_check_fields(results, pipeline_results,
results.get('img_fields', ['img']))
# check bboxes
_check_fields(results, pipeline_results, results.get('bbox_fields', []))
# check masks
_check_fields(results, pipeline_results, results.get('mask_fields', []))
# check segmentations
_check_fields(results, pipeline_results, results.get('seg_fields', []))
# check gt_labels
if 'gt_labels' in results:
assert np.equal(results['gt_labels'],
pipeline_results['gt_labels']).all()
def construct_toy_data(poly2mask=True):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
# image
results['img'] = img
results['img_shape'] = img.shape
results['img_fields'] = ['img']
# bboxes
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32)
results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]],
dtype=np.float32)
# labels
results['gt_labels'] = np.array([1], dtype=np.int64)
# masks
results['mask_fields'] = ['gt_masks']
if poly2mask:
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
else:
raw_masks = [[np.array([0, 0, 2, 0, 2, 1, 0, 1], dtype=np.float)]]
results['gt_masks'] = PolygonMasks(raw_masks, 2, 4)
# segmentations
results['seg_fields'] = ['gt_semantic_seg']
results['gt_semantic_seg'] = img[..., 0]
return results
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
|
_base_ = './retinanet_r50_fpn_crop640-50e_coco.py'
# model settings
model = dict(
# `pad_size_divisor=128` ensures the feature maps sizes
# in `NAS_FPN` won't mismatch.
data_preprocessor=dict(pad_size_divisor=128),
neck=dict(
_delete_=True,
type='NASFPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
stack_times=7,
start_level=1,
norm_cfg=dict(type='BN', requires_grad=True)))
|
_base_ = './retinanet_r50_fpn_crop640_50e_coco.py'
# model settings
model = dict(
# `pad_size_divisor=128` ensures the feature maps sizes
# in `NAS_FPN` won't mismatch.
data_preprocessor=dict(pad_size_divisor=128),
neck=dict(
_delete_=True,
type='NASFPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
stack_times=7,
start_level=1,
norm_cfg=dict(type='BN', requires_grad=True)))
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.7.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.7.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
__version__ = '0.13.1'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
if 'NO_VERSION_CHECK' not in os.environ:
from .helper import is_latest_version
is_latest_version()
|
__version__ = '0.13.0'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
if 'NO_VERSION_CHECK' not in os.environ:
from .helper import is_latest_version
is_latest_version()
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_lumina import LuminaPipeline, LuminaText2ImgPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_lumina"] = ["LuminaText2ImgPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_lumina import LuminaText2ImgPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Backbone models"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class TimmBackboneConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`].
It is used to instantiate a timm backbone model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone (`str`, *optional*):
The timm checkpoint to load.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
features_only (`bool`, *optional*, defaults to `True`):
Whether to output only the features or also the logits.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use a pretrained backbone.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). Will default to the last stage if unset.
freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`):
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`.
Example:
```python
>>> from transformers import TimmBackboneConfig, TimmBackbone
>>> # Initializing a timm backbone
>>> configuration = TimmBackboneConfig("resnet50")
>>> # Initializing a model from the configuration
>>> model = TimmBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "timm_backbone"
def __init__(
self,
backbone=None,
num_channels=3,
features_only=True,
use_pretrained_backbone=True,
out_indices=None,
freeze_batch_norm_2d=False,
**kwargs,
):
super().__init__(**kwargs)
self.backbone = backbone
self.num_channels = num_channels
self.features_only = features_only
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = True
self.out_indices = out_indices if out_indices is not None else [-1]
self.freeze_batch_norm_2d = freeze_batch_norm_2d
__all__ = ["TimmBackboneConfig"]
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Backbone models"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class TimmBackboneConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`].
It is used to instantiate a timm backbone model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone (`str`, *optional*):
The timm checkpoint to load.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
features_only (`bool`, *optional*, defaults to `True`):
Whether to output only the features or also the logits.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use a pretrained backbone.
out_indices (`List[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). Will default to the last stage if unset.
freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`):
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`.
Example:
```python
>>> from transformers import TimmBackboneConfig, TimmBackbone
>>> # Initializing a timm backbone
>>> configuration = TimmBackboneConfig("resnet50")
>>> # Initializing a model from the configuration
>>> model = TimmBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "timm_backbone"
def __init__(
self,
backbone=None,
num_channels=3,
features_only=True,
use_pretrained_backbone=True,
out_indices=None,
freeze_batch_norm_2d=False,
**kwargs,
):
super().__init__(**kwargs)
self.backbone = backbone
self.num_channels = num_channels
self.features_only = features_only
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = True
self.out_indices = out_indices if out_indices is not None else [-1]
self.freeze_batch_norm_2d = freeze_batch_norm_2d
__all__ = ["TimmBackboneConfig"]
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
from __future__ import annotations
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model)
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model)
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray import BaseDoc
from docarray.typing import ImageUrl
def test_set_image_url():
class MyDocument(BaseDoc):
image_url: ImageUrl
d = MyDocument(image_url="https://jina.ai/img.png")
assert isinstance(d.image_url, ImageUrl)
assert d.image_url == "https://jina.ai/img.png"
|
from docarray import BaseDoc
from docarray.typing import ImageUrl
def test_set_image_url():
class MyDocument(BaseDoc):
image_url: ImageUrl
d = MyDocument(image_url="https://jina.ai/img.png")
assert isinstance(d.image_url, ImageUrl)
assert d.image_url == "https://jina.ai/img.png"
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import Document
def image_getter(doc: 'Document'):
if doc._metadata['image_type'] == 'uri':
return doc.uri
elif doc._metadata['image_type'] == 'PIL':
from PIL import Image
return Image.fromarray(doc.tensor)
elif doc._metadata['image_type'] == 'ndarray':
return doc.tensor
def text_getter(doc: 'Document'):
return doc.text
def uri_getter(doc: 'Document'):
return doc.uri
def audio_getter(doc: 'Document'):
return doc.uri or doc.tensor
def video_getter(doc: 'Document'):
return doc.uri or doc.tensor
def mesh_getter(doc: 'Document'):
return doc.uri or doc.tensor
def tabular_getter(doc: 'Document'):
return doc.uri
def blob_getter(doc: 'Document'):
return doc.uri or doc.blob
def json_getter(doc: 'Document'):
return doc.tags
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import Document
def image_getter(doc: 'Document'):
if doc._metadata['image_type'] == 'uri':
return doc.uri
elif doc._metadata['image_type'] == 'PIL':
from PIL import Image
return Image.fromarray(doc.tensor)
elif doc._metadata['image_type'] == 'ndarray':
return doc.tensor
def text_getter(doc: 'Document'):
return doc.text
def audio_getter(doc: 'Document'):
return doc.uri or doc.tensor
def video_getter(doc: 'Document'):
return doc.uri or doc.tensor
def mesh_getter(doc: 'Document'):
return doc.uri or doc.tensor
def tabular_getter(doc: 'Document'):
return doc.uri
def blob_getter(doc: 'Document'):
return doc.uri or doc.blob
def json_getter(doc: 'Document'):
return doc.tags
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 480), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.7, 0.8]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": None,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"use_rotary_positional_embeddings": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.7, 0.8]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": None,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"use_rotary_positional_embeddings": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.