input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Schemas for tracers."""
from __future__ import annotations
import warnings
from datetime import datetime, timezone
from typing import Any, Optional
from uuid import UUID
from langsmith import RunTree
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from pydantic import PydanticDeprecationWarning
from pydantic.v1 import BaseModel as BaseModelV1
from pydantic.v1 import Field as FieldV1
from langchain_core._api import deprecated
@deprecated("0.1.0", alternative="Use string instead.", removal="1.0")
def RunTypeEnum() -> type[RunTypeEnumDep]: # noqa: N802
"""RunTypeEnum."""
warnings.warn(
"RunTypeEnum is deprecated. Please directly use a string instead"
" (e.g. 'llm', 'chain', 'tool').",
DeprecationWarning,
stacklevel=2,
)
return RunTypeEnumDep
@deprecated("0.1.0", removal="1.0")
class TracerSessionV1Base(BaseModelV1):
"""Base class for TracerSessionV1."""
start_time: datetime = FieldV1(default_factory=lambda: datetime.now(timezone.utc))
name: Optional[str] = None
extra: Optional[dict[str, Any]] = None
@deprecated("0.1.0", removal="1.0")
class TracerSessionV1Create(TracerSessionV1Base):
"""Create class for TracerSessionV1."""
@deprecated("0.1.0", removal="1.0")
class TracerSessionV1(TracerSessionV1Base):
"""TracerSessionV1 schema."""
id: int
@deprecated("0.1.0", removal="1.0")
class TracerSessionBase(TracerSessionV1Base):
"""Base class for TracerSession."""
tenant_id: UUID
@deprecated("0.1.0", removal="1.0")
class TracerSession(TracerSessionBase):
"""TracerSessionV1 schema for the V2 API."""
id: UUID
@deprecated("0.1.0", alternative="Run", removal="1.0")
class BaseRun(BaseModelV1):
"""Base class for Run."""
uuid: str
parent_uuid: Optional[str] = None
start_time: datetime = FieldV1(default_factory=lambda: datetime.now(timezone.utc))
end_time: datetime = FieldV1(default_factory=lambda: datetime.now(timezone.utc))
extra: Optional[dict[str, Any]] = None
execution_order: int
child_execution_order: int
serialized: dict[str, Any]
session_id: int
error: Optional[str] = None
@deprecated("0.1.0", alternative="Run", removal="1.0")
class LLMRun(BaseRun):
"""Class for LLMRun."""
prompts: list[str]
# Temporarily, remove but we will completely remove LLMRun
# response: Optional[LLMResult] = None
@deprecated("0.1.0", alternative="Run", removal="1.0")
class ChainRun(BaseRun):
"""Class for ChainRun."""
inputs: dict[str, Any]
outputs: Optional[dict[str, Any]] = None
child_llm_runs: list[LLMRun] = FieldV1(default_factory=list)
child_chain_runs: list[ChainRun] = FieldV1(default_factory=list)
child_tool_runs: list[ToolRun] = FieldV1(default_factory=list)
@deprecated("0.1.0", alternative="Run", removal="1.0")
class ToolRun(BaseRun):
"""Class for ToolRun."""
tool_input: str
output: Optional[str] = None
action: str
child_llm_runs: list[LLMRun] = FieldV1(default_factory=list)
child_chain_runs: list[ChainRun] = FieldV1(default_factory=list)
child_tool_runs: list[ToolRun] = FieldV1(default_factory=list)
# Begin V2 API Schemas
Run = RunTree # For backwards compatibility
# TODO: Update once langsmith moves to Pydantic V2 and we can swap Run.model_rebuild
# for Run.update_forward_refs
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=PydanticDeprecationWarning)
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
__all__ = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
|
"""Schemas for tracers."""
from __future__ import annotations
import datetime
import warnings
from typing import Any, Optional
from uuid import UUID
from langsmith import RunTree
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from pydantic import PydanticDeprecationWarning
from pydantic.v1 import BaseModel as BaseModelV1
from pydantic.v1 import Field as FieldV1
from langchain_core._api import deprecated
@deprecated("0.1.0", alternative="Use string instead.", removal="1.0")
def RunTypeEnum() -> type[RunTypeEnumDep]: # noqa: N802
"""RunTypeEnum."""
warnings.warn(
"RunTypeEnum is deprecated. Please directly use a string instead"
" (e.g. 'llm', 'chain', 'tool').",
DeprecationWarning,
stacklevel=2,
)
return RunTypeEnumDep
@deprecated("0.1.0", removal="1.0")
class TracerSessionV1Base(BaseModelV1):
"""Base class for TracerSessionV1."""
start_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow)
name: Optional[str] = None
extra: Optional[dict[str, Any]] = None
@deprecated("0.1.0", removal="1.0")
class TracerSessionV1Create(TracerSessionV1Base):
"""Create class for TracerSessionV1."""
@deprecated("0.1.0", removal="1.0")
class TracerSessionV1(TracerSessionV1Base):
"""TracerSessionV1 schema."""
id: int
@deprecated("0.1.0", removal="1.0")
class TracerSessionBase(TracerSessionV1Base):
"""Base class for TracerSession."""
tenant_id: UUID
@deprecated("0.1.0", removal="1.0")
class TracerSession(TracerSessionBase):
"""TracerSessionV1 schema for the V2 API."""
id: UUID
@deprecated("0.1.0", alternative="Run", removal="1.0")
class BaseRun(BaseModelV1):
"""Base class for Run."""
uuid: str
parent_uuid: Optional[str] = None
start_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow)
end_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow)
extra: Optional[dict[str, Any]] = None
execution_order: int
child_execution_order: int
serialized: dict[str, Any]
session_id: int
error: Optional[str] = None
@deprecated("0.1.0", alternative="Run", removal="1.0")
class LLMRun(BaseRun):
"""Class for LLMRun."""
prompts: list[str]
# Temporarily, remove but we will completely remove LLMRun
# response: Optional[LLMResult] = None
@deprecated("0.1.0", alternative="Run", removal="1.0")
class ChainRun(BaseRun):
"""Class for ChainRun."""
inputs: dict[str, Any]
outputs: Optional[dict[str, Any]] = None
child_llm_runs: list[LLMRun] = FieldV1(default_factory=list)
child_chain_runs: list[ChainRun] = FieldV1(default_factory=list)
child_tool_runs: list[ToolRun] = FieldV1(default_factory=list)
@deprecated("0.1.0", alternative="Run", removal="1.0")
class ToolRun(BaseRun):
"""Class for ToolRun."""
tool_input: str
output: Optional[str] = None
action: str
child_llm_runs: list[LLMRun] = FieldV1(default_factory=list)
child_chain_runs: list[ChainRun] = FieldV1(default_factory=list)
child_tool_runs: list[ToolRun] = FieldV1(default_factory=list)
# Begin V2 API Schemas
Run = RunTree # For backwards compatibility
# TODO: Update once langsmith moves to Pydantic V2 and we can swap Run.model_rebuild
# for Run.update_forward_refs
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=PydanticDeprecationWarning)
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
__all__ = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, Iterable, Type
from pydantic.fields import ModelField
if TYPE_CHECKING:
from docarray.document.mixins.proto import ProtoMixin
class AbstractDocument(Iterable):
__fields__: Dict[str, ModelField]
@classmethod
@abstractmethod
def _get_nested_document_class(cls, field: str) -> Type['ProtoMixin']:
...
|
from typing import Dict, Iterable
from pydantic.fields import ModelField
class AbstractDocument(Iterable):
__fields__: Dict[str, ModelField]
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
tool_input = _tool_input.get("__arg1", _tool_input)
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import DetCurveDisplay, det_curve
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("drop_intermediate", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
def test_det_curve_display(
pyplot,
constructor_name,
response_method,
with_sample_weight,
drop_intermediate,
with_strings,
):
X, y = load_iris(return_X_y=True)
# Binarize the data with only the two first classes
X, y = X[y < 2], y[y < 2]
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
y_pred = getattr(lr, response_method)(X)
if y_pred.ndim == 2:
y_pred = y_pred[:, 1]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
common_kwargs = {
"name": lr.__class__.__name__,
"alpha": 0.8,
"sample_weight": sample_weight,
"drop_intermediate": drop_intermediate,
"pos_label": pos_label,
}
if constructor_name == "from_estimator":
disp = DetCurveDisplay.from_estimator(lr, X, y, **common_kwargs)
else:
disp = DetCurveDisplay.from_predictions(y, y_pred, **common_kwargs)
fpr, fnr, _ = det_curve(
y,
y_pred,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
)
assert_allclose(disp.fpr, fpr, atol=1e-7)
assert_allclose(disp.fnr, fnr, atol=1e-7)
assert disp.estimator_name == "LogisticRegression"
# cannot fail thanks to pyplot fixture
import matplotlib as mpl
assert isinstance(disp.line_, mpl.lines.Line2D)
assert disp.line_.get_alpha() == 0.8
assert isinstance(disp.ax_, mpl.axes.Axes)
assert isinstance(disp.figure_, mpl.figure.Figure)
assert disp.line_.get_label() == "LogisticRegression"
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = f"False Negative Rate (Positive label: {expected_pos_label})"
expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
assert disp.ax_.get_ylabel() == expected_ylabel
assert disp.ax_.get_xlabel() == expected_xlabel
@pytest.mark.parametrize(
"constructor_name, expected_clf_name",
[
("from_estimator", "LogisticRegression"),
("from_predictions", "Classifier"),
],
)
def test_det_curve_display_default_name(
pyplot,
constructor_name,
expected_clf_name,
):
# Check the default name display in the figure when `name` is not provided
X, y = load_iris(return_X_y=True)
# Binarize the data with only the two first classes
X, y = X[y < 2], y[y < 2]
lr = LogisticRegression().fit(X, y)
y_pred = lr.predict_proba(X)[:, 1]
if constructor_name == "from_estimator":
disp = DetCurveDisplay.from_estimator(lr, X, y)
else:
disp = DetCurveDisplay.from_predictions(y, y_pred)
assert disp.estimator_name == expected_clf_name
assert disp.line_.get_label() == expected_clf_name
|
import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import DetCurveDisplay, det_curve
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
def test_det_curve_display(
pyplot, constructor_name, response_method, with_sample_weight, with_strings
):
X, y = load_iris(return_X_y=True)
# Binarize the data with only the two first classes
X, y = X[y < 2], y[y < 2]
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
y_pred = getattr(lr, response_method)(X)
if y_pred.ndim == 2:
y_pred = y_pred[:, 1]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
common_kwargs = {
"name": lr.__class__.__name__,
"alpha": 0.8,
"sample_weight": sample_weight,
"pos_label": pos_label,
}
if constructor_name == "from_estimator":
disp = DetCurveDisplay.from_estimator(lr, X, y, **common_kwargs)
else:
disp = DetCurveDisplay.from_predictions(y, y_pred, **common_kwargs)
fpr, fnr, _ = det_curve(
y,
y_pred,
sample_weight=sample_weight,
pos_label=pos_label,
)
assert_allclose(disp.fpr, fpr)
assert_allclose(disp.fnr, fnr)
assert disp.estimator_name == "LogisticRegression"
# cannot fail thanks to pyplot fixture
import matplotlib as mpl
assert isinstance(disp.line_, mpl.lines.Line2D)
assert disp.line_.get_alpha() == 0.8
assert isinstance(disp.ax_, mpl.axes.Axes)
assert isinstance(disp.figure_, mpl.figure.Figure)
assert disp.line_.get_label() == "LogisticRegression"
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = f"False Negative Rate (Positive label: {expected_pos_label})"
expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
assert disp.ax_.get_ylabel() == expected_ylabel
assert disp.ax_.get_xlabel() == expected_xlabel
@pytest.mark.parametrize(
"constructor_name, expected_clf_name",
[
("from_estimator", "LogisticRegression"),
("from_predictions", "Classifier"),
],
)
def test_det_curve_display_default_name(
pyplot,
constructor_name,
expected_clf_name,
):
# Check the default name display in the figure when `name` is not provided
X, y = load_iris(return_X_y=True)
# Binarize the data with only the two first classes
X, y = X[y < 2], y[y < 2]
lr = LogisticRegression().fit(X, y)
y_pred = lr.predict_proba(X)[:, 1]
if constructor_name == "from_estimator":
disp = DetCurveDisplay.from_estimator(lr, X, y)
else:
disp = DetCurveDisplay.from_predictions(y, y_pred)
assert disp.estimator_name == expected_clf_name
assert disp.line_.get_label() == expected_clf_name
|
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture(scope="session")
def _stsb_bert_tiny_model() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def stsb_bert_tiny_model(_stsb_bert_tiny_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_stsb_bert_tiny_model)
@pytest.fixture(scope="session")
def _avg_word_embeddings_levy() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers/average_word_embeddings_levy_dependency")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def avg_word_embeddings_levy(_avg_word_embeddings_levy: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_avg_word_embeddings_levy)
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture(scope="session")
def _static_retrieval_mrl_en_v1_model() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers/static-retrieval-mrl-en-v1")
return model
@pytest.fixture()
def static_retrieval_mrl_en_v1_model(_static_retrieval_mrl_en_v1_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_static_retrieval_mrl_en_v1_model)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture(scope="session")
def _stsb_bert_tiny_model() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def stsb_bert_tiny_model(_stsb_bert_tiny_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_stsb_bert_tiny_model)
@pytest.fixture(scope="session")
def _avg_word_embeddings_levy() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers/average_word_embeddings_levy_dependency")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def avg_word_embeddings_levy(_avg_word_embeddings_levy: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_avg_word_embeddings_levy)
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.21.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.20.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from ._vggish import VGGISH, VGGishBundle
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH as _HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import (
EMFORMER_RNNT_BASE_MUSTC as _EMFORMER_RNNT_BASE_MUSTC,
EMFORMER_RNNT_BASE_TEDLIUM3 as _EMFORMER_RNNT_BASE_TEDLIUM3
)
from torchaudio._internal.module_utils import dropping_const_support
EMFORMER_RNNT_BASE_MUSTC = dropping_const_support(_EMFORMER_RNNT_BASE_MUSTC)
EMFORMER_RNNT_BASE_TEDLIUM3 = dropping_const_support(_EMFORMER_RNNT_BASE_TEDLIUM3)
HIFIGAN_VOCODER_V3_LJSPEECH = dropping_const_support(_HIFIGAN_VOCODER_V3_LJSPEECH)
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
"VGGISH",
"VGGishBundle",
]
|
from ._vggish import VGGISH, VGGishBundle
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
"VGGISH",
"VGGishBundle",
]
|
_base_ = './cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './cascade_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import Tensor
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor.flush_ndarray(nd_proto, value=original_tensor)
nested_item = NodeProto(tensor=nd_proto)
tensor = Tensor.read_ndarray(nested_item.tensor)
assert (tensor == original_tensor).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor.flush_ndarray(nd_proto, value=original_tensor)
nested_item2 = NodeProto(tensor=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.proto.io import flush_ndarray, read_ndarray
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
flush_ndarray(nd_proto, value=original_tensor)
nested_item = NodeProto(tensor=nd_proto)
tensor = read_ndarray(nested_item.tensor)
assert (tensor == original_tensor).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
flush_ndarray(nd_proto, value=original_tensor)
nested_item2 = NodeProto(tensor=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
from typing import Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.evaluation import (
AnswerRelevancyEvaluator,
BaseEvaluator,
EvaluationResult,
)
from llama_index.core.tools import QueryEngineTool
from llama_index.core.tools.types import ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
FAILED_TOOL_OUTPUT_TEMPLATE = (
"Could not use tool {tool_name} because it failed evaluation.\n" "Reason: {reason}"
)
class EvalQueryEngineTool(QueryEngineTool):
"""
Evaluating query engine tool.
A tool that makes use of a query engine and an evaluator, where the
evaluation of the query engine response will determine the tool output.
Args:
evaluator (BaseEvaluator): A query engine.
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
_evaluator: BaseEvaluator
_failed_tool_output_template: str
def __init__(
self,
evaluator: BaseEvaluator,
*args: Any,
failed_tool_output_template: str = FAILED_TOOL_OUTPUT_TEMPLATE,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._evaluator = evaluator
self._failed_tool_output_template = failed_tool_output_template
def _process_tool_output(
self,
tool_output: ToolOutput,
evaluation_result: EvaluationResult,
) -> ToolOutput:
if evaluation_result.passing:
return tool_output
tool_output.content = self._failed_tool_output_template.format(
tool_name=self.metadata.name,
reason=evaluation_result.feedback,
)
return tool_output
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
resolve_input_errors: bool = True,
evaluator: Optional[BaseEvaluator] = None,
) -> "EvalQueryEngineTool":
return cls(
evaluator=evaluator or AnswerRelevancyEvaluator(),
query_engine=query_engine,
metadata=ToolMetadata(
name=name or DEFAULT_NAME,
description=description or DEFAULT_DESCRIPTION,
return_direct=return_direct,
),
resolve_input_errors=resolve_input_errors,
)
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
tool_output = super().call(*args, **kwargs)
evaluation_results = self._evaluator.evaluate_response(
tool_output.raw_input["input"], tool_output.raw_output
)
return self._process_tool_output(tool_output, evaluation_results)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
tool_output = await super().acall(*args, **kwargs)
evaluation_results = await self._evaluator.aevaluate_response(
tool_output.raw_input["input"], tool_output.raw_output
)
return self._process_tool_output(tool_output, evaluation_results)
|
from typing import Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.evaluation import (
AnswerRelevancyEvaluator,
BaseEvaluator,
EvaluationResult,
)
from llama_index.core.tools import QueryEngineTool
from llama_index.core.tools.types import ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
FAILED_TOOL_OUTPUT_TEMPLATE = (
"Could not use tool {tool_name} because it failed evaluation.\n" "Reason: {reason}"
)
class EvalQueryEngineTool(QueryEngineTool):
"""Evaluating query engine tool.
A tool that makes use of a query engine and an evaluator, where the
evaluation of the query engine response will determine the tool output.
Args:
evaluator (BaseEvaluator): A query engine.
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
_evaluator: BaseEvaluator
_failed_tool_output_template: str
def __init__(
self,
evaluator: BaseEvaluator,
*args: Any,
failed_tool_output_template: str = FAILED_TOOL_OUTPUT_TEMPLATE,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._evaluator = evaluator
self._failed_tool_output_template = failed_tool_output_template
def _process_tool_output(
self,
tool_output: ToolOutput,
evaluation_result: EvaluationResult,
) -> ToolOutput:
if evaluation_result.passing:
return tool_output
tool_output.content = self._failed_tool_output_template.format(
tool_name=self.metadata.name,
reason=evaluation_result.feedback,
)
return tool_output
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
resolve_input_errors: bool = True,
evaluator: Optional[BaseEvaluator] = None,
) -> "EvalQueryEngineTool":
return cls(
evaluator=evaluator or AnswerRelevancyEvaluator(),
query_engine=query_engine,
metadata=ToolMetadata(
name=name or DEFAULT_NAME,
description=description or DEFAULT_DESCRIPTION,
return_direct=return_direct,
),
resolve_input_errors=resolve_input_errors,
)
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
tool_output = super().call(*args, **kwargs)
evaluation_results = self._evaluator.evaluate_response(
tool_output.raw_input["input"], tool_output.raw_output
)
return self._process_tool_output(tool_output, evaluation_results)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
tool_output = await super().acall(*args, **kwargs)
evaluation_results = await self._evaluator.aevaluate_response(
tool_output.raw_input["input"], tool_output.raw_output
)
return self._process_tool_output(tool_output, evaluation_results)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline)))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a VerticesAndFaces object containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDoc):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a VerticesAndFaces object containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
from .base import ElevenLabsVoiceAgent, ElevenLabsVoiceAgentInterface
__all__ = ["ElevenLabsVoiceAgent", "ElevenLabsVoiceAgentInterface"]
|
from .base import ElevenLabsConversation
__all__ = ["ElevenLabsConversation"]
|
import mimetypes
from typing import TYPE_CHECKING, Optional
from docarray.document.mixins._property import _PropertyMixin
if TYPE_CHECKING:
from docarray.typing import DocumentContentType, ArrayType
from docarray import DocumentArray
_all_mime_types = set(mimetypes.types_map.values())
class PropertyMixin(_PropertyMixin):
def _clear_content(self):
self._data.content = None
self._data.text = None
self._data.tensor = None
self._data.blob = None
@property
def content(self) -> Optional['DocumentContentType']:
ct = self.content_type
if ct:
return getattr(self, ct)
@_PropertyMixin.text.setter
def text(self, value: str):
if value is not None:
self._clear_content()
self._data.text = value
@_PropertyMixin.blob.setter
def blob(self, value: bytes):
if value is not None:
self._clear_content()
self._data.blob = value
@_PropertyMixin.tensor.setter
def tensor(self, value: 'ArrayType'):
if value is not None:
self._clear_content()
self._data.tensor = value
@content.setter
def content(self, value: 'DocumentContentType'):
self._clear_content()
if isinstance(value, bytes):
self._data.blob = value
elif isinstance(value, str):
self._data.text = value
elif value is not None:
self._data.tensor = value
@_PropertyMixin.uri.setter
def uri(self, value: str):
if value:
mime_type = mimetypes.guess_type(value)[0]
if mime_type:
self._data.mime_type = mime_type
self._data.uri = value
@_PropertyMixin.mime_type.setter
def mime_type(self, value: str):
if value and value not in _all_mime_types:
# given but not recognizable, do best guess
r = mimetypes.guess_type(f'*.{value}')[0]
value = r or value
self._data.mime_type = value
@_PropertyMixin.chunks.setter
def chunks(self, value: 'DocumentArray'):
from docarray.array.chunk import ChunkArray
if not isinstance(value, ChunkArray):
value = ChunkArray(value, reference_doc=self._data._reference_doc)
self._data.chunks = value
@_PropertyMixin.matches.setter
def matches(self, value: 'DocumentArray'):
from docarray.array.match import MatchArray
if not isinstance(value, MatchArray):
value = MatchArray(value, reference_doc=self._data._reference_doc)
self._data.matches = value
@property
def content_type(self) -> Optional[str]:
nf = self.non_empty_fields
if 'text' in nf:
return 'text'
elif 'tensor' in nf:
return 'tensor'
elif 'blob' in nf:
return 'blob'
|
import mimetypes
from typing import TYPE_CHECKING, Optional
from ._property import _PropertyMixin
if TYPE_CHECKING:
from ...typing import DocumentContentType, ArrayType
from ... import DocumentArray
_all_mime_types = set(mimetypes.types_map.values())
class PropertyMixin(_PropertyMixin):
def _clear_content(self):
self._data.content = None
self._data.text = None
self._data.tensor = None
self._data.blob = None
@property
def content(self) -> Optional['DocumentContentType']:
ct = self.content_type
if ct:
return getattr(self, ct)
@_PropertyMixin.text.setter
def text(self, value: str):
if value is not None:
self._clear_content()
self._data.text = value
@_PropertyMixin.blob.setter
def blob(self, value: bytes):
if value is not None:
self._clear_content()
self._data.blob = value
@_PropertyMixin.tensor.setter
def tensor(self, value: 'ArrayType'):
if value is not None:
self._clear_content()
self._data.tensor = value
@content.setter
def content(self, value: 'DocumentContentType'):
self._clear_content()
if isinstance(value, bytes):
self._data.blob = value
elif isinstance(value, str):
self._data.text = value
elif value is not None:
self._data.tensor = value
@_PropertyMixin.uri.setter
def uri(self, value: str):
if value:
mime_type = mimetypes.guess_type(value)[0]
if mime_type:
self._data.mime_type = mime_type
self._data.uri = value
@_PropertyMixin.mime_type.setter
def mime_type(self, value: str):
if value and value not in _all_mime_types:
# given but not recognizable, do best guess
r = mimetypes.guess_type(f'*.{value}')[0]
value = r or value
self._data.mime_type = value
@_PropertyMixin.chunks.setter
def chunks(self, value: 'DocumentArray'):
from ...array.chunk import ChunkArray
if not isinstance(value, ChunkArray):
value = ChunkArray(value, reference_doc=self._data._reference_doc)
self._data.chunks = value
@_PropertyMixin.matches.setter
def matches(self, value: 'DocumentArray'):
from ...array.match import MatchArray
if not isinstance(value, MatchArray):
value = MatchArray(value, reference_doc=self._data._reference_doc)
self._data.matches = value
@property
def content_type(self) -> Optional[str]:
nf = self.non_empty_fields
if 'text' in nf:
return 'text'
elif 'tensor' in nf:
return 'tensor'
elif 'blob' in nf:
return 'blob'
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from pathlib import Path
from .misc import is_str
def is_filepath(x):
return is_str(x) or isinstance(x, Path)
def fopen(filepath, *args, **kwargs):
if is_str(filepath):
return open(filepath, *args, **kwargs)
elif isinstance(filepath, Path):
return filepath.open(*args, **kwargs)
raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if not osp.isfile(filename):
raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
"""Scan a directory to find the interested files.
Args:
dir_path (str | :obj:`Path`): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Defaults to None.
recursive (bool, optional): If set to True, recursively scan the
directory. Defaults to False.
case_sensitive (bool, optional) : If set to False, ignore the case of
suffix. Defaults to True.
Returns:
A generator for all the interested files with relative paths.
"""
if isinstance(dir_path, (str, Path)):
dir_path = str(dir_path)
else:
raise TypeError('"dir_path" must be a string or Path object')
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
if suffix is not None and not case_sensitive:
suffix = suffix.lower() if isinstance(suffix, str) else tuple(
item.lower() for item in suffix)
root = dir_path
def _scandir(dir_path, suffix, recursive, case_sensitive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
rel_path = osp.relpath(entry.path, root)
_rel_path = rel_path if case_sensitive else rel_path.lower()
if suffix is None or _rel_path.endswith(suffix):
yield rel_path
elif recursive and os.path.isdir(entry.path):
# scan recursively if entry.path is a directory
yield from _scandir(entry.path, suffix, recursive,
case_sensitive)
return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git', )):
"""Finds the root directory (including itself) of specified markers.
Args:
path (str): Path of directory or file.
markers (list[str], optional): List of file or directory names.
Returns:
The directory contained one of the markers or None if not found.
"""
if osp.isfile(path):
path = osp.dirname(path)
prev, cur = None, osp.abspath(osp.expanduser(path))
while cur != prev:
if any(osp.exists(osp.join(cur, marker)) for marker in markers):
return cur
prev, cur = cur, osp.split(cur)[0]
return None
def is_abs(path: str) -> bool:
"""Check if path is an absolute path in different backends.
Args:
path (str): path of directory or file.
Returns:
bool: whether path is an absolute path.
"""
if osp.isabs(path) or path.startswith(('http://', 'https://', 's3://')):
return True
else:
return False
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from pathlib import Path
from .misc import is_str
def is_filepath(x):
return is_str(x) or isinstance(x, Path)
def fopen(filepath, *args, **kwargs):
if is_str(filepath):
return open(filepath, *args, **kwargs)
elif isinstance(filepath, Path):
return filepath.open(*args, **kwargs)
raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if not osp.isfile(filename):
raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
"""Scan a directory to find the interested files.
Args:
dir_path (str | :obj:`Path`): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
case_sensitive (bool, optional) : If set to False, ignore the case of
suffix. Default: True.
Returns:
A generator for all the interested files with relative paths.
"""
if isinstance(dir_path, (str, Path)):
dir_path = str(dir_path)
else:
raise TypeError('"dir_path" must be a string or Path object')
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
if suffix is not None and not case_sensitive:
suffix = suffix.lower() if isinstance(suffix, str) else tuple(
item.lower() for item in suffix)
root = dir_path
def _scandir(dir_path, suffix, recursive, case_sensitive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
rel_path = osp.relpath(entry.path, root)
_rel_path = rel_path if case_sensitive else rel_path.lower()
if suffix is None or _rel_path.endswith(suffix):
yield rel_path
elif recursive and os.path.isdir(entry.path):
# scan recursively if entry.path is a directory
yield from _scandir(entry.path, suffix, recursive,
case_sensitive)
return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git', )):
"""Finds the root directory (including itself) of specified markers.
Args:
path (str): Path of directory or file.
markers (list[str], optional): List of file or directory names.
Returns:
The directory contained one of the markers or None if not found.
"""
if osp.isfile(path):
path = osp.dirname(path)
prev, cur = None, osp.abspath(osp.expanduser(path))
while cur != prev:
if any(osp.exists(osp.join(cur, marker)) for marker in markers):
return cur
prev, cur = cur, osp.split(cur)[0]
return None
def is_abs(path: str) -> bool:
"""Check if path is an absolute path in different backends.
Args:
path (str): path of directory or file.
Returns:
bool: whether path is an absolute path.
"""
if osp.isabs(path) or path.startswith(('http://', 'https://', 's3://')):
return True
else:
return False
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
)
__all__ = [
'AudioNdArray',
'NdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'AnyUrl',
'ID',
'AnyTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__.extend(['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor'])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook', 'SetEpochInfoHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook'
]
|
"""
Implements the Generalized R-CNN framework
"""
import warnings
from collections import OrderedDict
from typing import Optional, Union
import torch
from torch import nn
from ...utils import _log_api_usage_once
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN.
Args:
backbone (nn.Module):
rpn (nn.Module):
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
detections / masks from it.
transform (nn.Module): performs the data transformation from the inputs to feed into
the model
"""
def __init__(
self,
backbone: nn.Module,
rpn: nn.Module,
roi_heads: nn.Module,
transform: nn.Module,
) -> None:
super().__init__()
_log_api_usage_once(self)
self.transform = transform
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
# used only on torchscript mode
self._has_warned = False
@torch.jit.unused
def eager_outputs(
self, losses: dict[str, torch.Tensor], detections: list[dict[str, torch.Tensor]]
) -> Union[dict[str, torch.Tensor], list[dict[str, torch.Tensor]]]:
if self.training:
return losses
return detections
def forward(
self,
images: list[torch.Tensor],
targets: Optional[list[dict[str, torch.Tensor]]] = None,
) -> tuple[dict[str, torch.Tensor], list[dict[str, torch.Tensor]]]:
"""
Args:
images (list[Tensor]): images to be processed
targets (list[dict[str, tensor]]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else:
torch._assert(
False,
f"Expected target boxes to be of type Tensor, got {type(boxes)}.",
)
original_image_sizes: list[tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
# Check for degenerate boxes
# TODO: Move this to a function
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: list[float] = boxes[bb_idx].tolist()
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
proposals, proposal_losses = self.rpn(images, features, targets)
detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
detections = self.transform.postprocess(
detections, images.image_sizes, original_image_sizes
) # type: ignore[operator]
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
if torch.jit.is_scripting():
if not self._has_warned:
warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True
return losses, detections
else:
return self.eager_outputs(losses, detections)
|
"""
Implements the Generalized R-CNN framework
"""
import warnings
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import nn, Tensor
from ...utils import _log_api_usage_once
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN.
Args:
backbone (nn.Module):
rpn (nn.Module):
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
detections / masks from it.
transform (nn.Module): performs the data transformation from the inputs to feed into
the model
"""
def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None:
super().__init__()
_log_api_usage_once(self)
self.transform = transform
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
# used only on torchscript mode
self._has_warned = False
@torch.jit.unused
def eager_outputs(self, losses, detections):
# type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]]
if self.training:
return losses
return detections
def forward(self, images, targets=None):
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[str, Tensor]]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training:
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else:
torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
# Check for degenerate boxes
# TODO: Move this to a function
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([("0", features)])
proposals, proposal_losses = self.rpn(images, features, targets)
detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) # type: ignore[operator]
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
if torch.jit.is_scripting():
if not self._has_warned:
warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True
return losses, detections
else:
return self.eager_outputs(losses, detections)
|
# pants requires this import to recognize the dep
import pytest_asyncio # noqa: F401
import pytest
import os
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from llama_index.embeddings.nvidia.base import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield val
finally:
if val:
os.environ[var] = val
@pytest.fixture(params=[Interface])
def public_class(request: pytest.FixtureRequest) -> type:
return request.param
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
mode = get_mode(metafunc.config)
if "model" in metafunc.fixturenames:
# Default models to test - include both default and custom endpoint models
models = [DEFAULT_MODEL, "NV-Embed-QA"]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in Interface(**mode).available_models]
metafunc.parametrize("model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
# pants requires this import to recognize the dep
import pytest_asyncio # noqa: F401
import pytest
import os
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from llama_index.embeddings.nvidia.base import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield val
finally:
if val:
os.environ[var] = val
@pytest.fixture(params=[Interface])
def public_class(request: pytest.FixtureRequest) -> type:
return request.param
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
mode = get_mode(metafunc.config)
if "model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in Interface(**mode).available_models]
metafunc.parametrize("model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
_base_ = ['co_dino_5scale_r50_8xb2_1x_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_16e_o365tococo-614254c9.pth' # noqa
# model settings
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=True,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768, 1536]),
query_head=dict(
dn_cfg=dict(box_noise_scale=0.4, group_cfg=dict(num_dn_queries=500)),
transformer=dict(encoder=dict(with_cp=6))))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
]
]),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=1, num_workers=1, dataset=dict(pipeline=train_pipeline))
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1280), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(optimizer=dict(lr=1e-4))
max_epochs = 16
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8],
gamma=0.1)
]
|
_base_ = ['co_dino_5scale_r50_8xb2_1x_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_22e_o365-0a33e247.pth' # noqa
# model settings
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=True,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768, 1536]),
query_head=dict(
dn_cfg=dict(box_noise_scale=0.4, group_cfg=dict(num_dn_queries=500)),
transformer=dict(encoder=dict(with_cp=6))))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
]
]),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=1, num_workers=1, dataset=dict(pipeline=train_pipeline))
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1280), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(optimizer=dict(lr=1e-4))
max_epochs = 16
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8],
gamma=0.1)
]
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# Owner(s): ["oncall: distributed"]
from model_registry import MLPModule, ModelWithParamAlias
import torch
from torch.distributed.pipelining import pipe_split, pipeline
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
d_hid = 512
microbatch_size = 16
torch.manual_seed(0)
# Basic example
class ExampleCode(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin1 = torch.nn.Linear(d_hid, d_hid)
self.lin2 = torch.nn.Linear(d_hid, d_hid)
def forward(self, x, y):
x = torch.mm(x, self.mm_param1) # mutli-use param
skip_connection = x
x = x + y
x = torch.relu(x)
pipe_split()
x = torch.mm(x, self.mm_param1) # mutli-use param
x = self.lin1(x)
pipe_split()
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
pipe_split()
x = self.lin2(x)
x = torch.relu(x)
return x
class MultiMLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mlp0 = MLPModule(d_hid)
self.mlp1 = MLPModule(d_hid)
self.mlp2 = MLPModule(d_hid)
self.mlp3 = MLPModule(d_hid)
def forward(self, x, y):
x = self.mlp0(x)
pipe_split()
x = self.mlp1(x)
pipe_split()
x = self.mlp2(x)
pipe_split()
x = self.mlp3(x)
return x - y
EXPECTED_N_STAGES = {
ExampleCode: 4,
MultiMLP: 4,
ModelWithParamAlias: 2,
}
# Currently, we don't enforce full set equality on the FQNs between the original
# and pipelined models, because in the multi-use param case, PP will deduplicate
# the FQNs from the state_dict.
# TODO
CHECK_FQN_SET_EQUALITY = False
class PipeTests(TestCase):
@parametrize("ModelClass", [ExampleCode, MultiMLP, ModelWithParamAlias])
def test_model_split(self, ModelClass):
mod = ModelClass()
x = torch.randn(microbatch_size, d_hid)
y = torch.randn(microbatch_size, d_hid)
pipe = pipeline(
mod,
mb_args=(x, y),
)
assert pipe.num_stages == EXPECTED_N_STAGES[ModelClass], (
f"nstages = {pipe.num_stages}, expect {EXPECTED_N_STAGES[ModelClass]}"
)
ref_out = mod(x, y)
out = pipe(x, y)[0]
torch.testing.assert_close(out, ref_out)
print(f"equivalence test passed {torch.sum(out)} ref {torch.sum(ref_out)}")
# Check qualname
# state_dict.keys include both parameters and persistent buffers
old_names = set(mod.state_dict().keys())
new_names = set()
for idx in range(pipe.num_stages):
stage_mod = pipe.get_stage_module(idx)
stage_fqns = set(stage_mod.state_dict().keys())
assert stage_fqns.issubset(old_names)
new_names.update(stage_fqns)
if CHECK_FQN_SET_EQUALITY:
assert old_names == new_names, f"""
old names {old_names}
new names {new_names}
"""
print("Qualname check passed")
instantiate_parametrized_tests(PipeTests)
if __name__ == "__main__":
run_tests()
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# Owner(s): ["oncall: distributed"]
from model_registry import MLPModule, ModelWithParamAlias
import torch
from torch.distributed.pipelining import pipe_split, pipeline
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
d_hid = 512
microbatch_size = 16
torch.manual_seed(0)
# Basic example
class ExampleCode(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin1 = torch.nn.Linear(d_hid, d_hid)
self.lin2 = torch.nn.Linear(d_hid, d_hid)
def forward(self, x, y):
x = torch.mm(x, self.mm_param1) # mutli-use param
skip_connection = x
x = x + y
x = torch.relu(x)
pipe_split()
x = torch.mm(x, self.mm_param1) # mutli-use param
x = self.lin1(x)
pipe_split()
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
pipe_split()
x = self.lin2(x)
x = torch.relu(x)
return x
class MultiMLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mlp0 = MLPModule(d_hid)
self.mlp1 = MLPModule(d_hid)
self.mlp2 = MLPModule(d_hid)
self.mlp3 = MLPModule(d_hid)
def forward(self, x, y):
x = self.mlp0(x)
pipe_split()
x = self.mlp1(x)
pipe_split()
x = self.mlp2(x)
pipe_split()
x = self.mlp3(x)
return x - y
EXPECTED_N_STAGES = {
ExampleCode: 4,
MultiMLP: 4,
ModelWithParamAlias: 2,
}
# Currently, we don't enforce full set equality on the FQNs between the original
# and pipelined models, because in the multi-use param case, PP will deduplicate
# the FQNs from the state_dict.
# TODO
CHECK_FQN_SET_EQUALITY = False
class PipeTests(TestCase):
@parametrize("ModelClass", [ExampleCode, MultiMLP, ModelWithParamAlias])
def test_model_split(self, ModelClass):
mod = ModelClass()
x = torch.randn(microbatch_size, d_hid)
y = torch.randn(microbatch_size, d_hid)
pipe = pipeline(
mod,
mb_args=(x, y),
)
assert (
pipe.num_stages == EXPECTED_N_STAGES[ModelClass]
), f"nstages = {pipe.num_stages}, expect {EXPECTED_N_STAGES[ModelClass]}"
ref_out = mod(x, y)
out = pipe(x, y)[0]
torch.testing.assert_close(out, ref_out)
print(f"equivalence test passed {torch.sum(out)} ref {torch.sum(ref_out)}")
# Check qualname
# state_dict.keys include both parameters and persistent buffers
old_names = set(mod.state_dict().keys())
new_names = set()
for idx in range(pipe.num_stages):
stage_mod = pipe.get_stage_module(idx)
stage_fqns = set(stage_mod.state_dict().keys())
assert stage_fqns.issubset(old_names)
new_names.update(stage_fqns)
if CHECK_FQN_SET_EQUALITY:
assert (
old_names == new_names
), f"""
old names {old_names}
new names {new_names}
"""
print("Qualname check passed")
instantiate_parametrized_tests(PipeTests)
if __name__ == "__main__":
run_tests()
|
"""Init file."""
from llama_index.readers.web.agentql_web.base import (
AgentQLWebReader,
)
from llama_index.readers.web.async_web.base import (
AsyncWebPageReader,
)
from llama_index.readers.web.beautiful_soup_web.base import (
BeautifulSoupWebReader,
)
from llama_index.readers.web.browserbase_web.base import BrowserbaseWebReader
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
from llama_index.readers.web.hyperbrowser_web.base import HyperbrowserWebReader
from llama_index.readers.web.knowledge_base.base import (
KnowledgeBaseWebReader,
)
from llama_index.readers.web.main_content_extractor.base import (
MainContentExtractorReader,
)
from llama_index.readers.web.news.base import NewsArticleReader
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
from llama_index.readers.web.readability_web.base import (
ReadabilityWebPageReader,
)
from llama_index.readers.web.rss.base import (
RssReader,
)
from llama_index.readers.web.rss_news.base import (
RssNewsReader,
)
from llama_index.readers.web.scrapfly_web.base import (
ScrapflyReader,
)
from llama_index.readers.web.simple_web.base import (
SimpleWebPageReader,
)
from llama_index.readers.web.sitemap.base import (
SitemapReader,
)
from llama_index.readers.web.spider_web.base import (
SpiderWebReader,
)
from llama_index.readers.web.trafilatura_web.base import (
TrafilaturaWebReader,
)
from llama_index.readers.web.unstructured_web.base import (
UnstructuredURLLoader,
)
from llama_index.readers.web.whole_site.base import (
WholeSiteReader,
)
from llama_index.readers.web.zyte_web.base import (
ZyteWebReader,
)
__all__ = [
"AgentQLWebReader",
"AsyncWebPageReader",
"BeautifulSoupWebReader",
"BrowserbaseWebReader",
"FireCrawlWebReader",
"HyperbrowserWebReader",
"KnowledgeBaseWebReader",
"MainContentExtractorReader",
"NewsArticleReader",
"OxylabsWebReader",
"ReadabilityWebPageReader",
"RssReader",
"RssNewsReader",
"ScrapflyReader",
"SimpleWebPageReader",
"SitemapReader",
"SpiderWebReader",
"TrafilaturaWebReader",
"UnstructuredURLLoader",
"WholeSiteReader",
"ZyteWebReader",
]
|
"""Init file."""
from llama_index.readers.web.agentql_web.base import (
AgentQLWebReader,
)
from llama_index.readers.web.async_web.base import (
AsyncWebPageReader,
)
from llama_index.readers.web.beautiful_soup_web.base import (
BeautifulSoupWebReader,
)
from llama_index.readers.web.browserbase_web.base import BrowserbaseWebReader
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
from llama_index.readers.web.hyperbrowser_web.base import HyperbrowserWebReader
from llama_index.readers.web.knowledge_base.base import (
KnowledgeBaseWebReader,
)
from llama_index.readers.web.main_content_extractor.base import (
MainContentExtractorReader,
)
from llama_index.readers.web.news.base import NewsArticleReader
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
from llama_index.readers.web.readability_web.base import (
ReadabilityWebPageReader,
)
from llama_index.readers.web.rss.base import (
RssReader,
)
from llama_index.readers.web.rss_news.base import (
RssNewsReader,
)
from llama_index.readers.web.scrapfly_web.base import (
ScrapflyReader,
)
from llama_index.readers.web.simple_web.base import (
SimpleWebPageReader,
)
from llama_index.readers.web.sitemap.base import (
SitemapReader,
)
from llama_index.readers.web.spider_web.base import (
SpiderWebReader,
)
from llama_index.readers.web.trafilatura_web.base import (
TrafilaturaWebReader,
)
from llama_index.readers.web.unstructured_web.base import (
UnstructuredURLLoader,
)
from llama_index.readers.web.whole_site.base import (
WholeSiteReader,
)
from llama_index.readers.web.zyte_web.base import (
ZyteWebReader,
)
__all__ = [
"AgentQLWebReader",
"AsyncWebPageReader",
"BeautifulSoupWebReader",
"BrowserbaseWebReader",
"FireCrawlWebReader",
"HyperbrowserWebReader",
"KnowledgeBaseWebReader",
"MainContentExtractorReader",
"NewsArticleReader",
"OxylabsWebReader",
"ReadabilityWebPageReader",
"RssReader",
"RssNewsReader",
"ScrapflyReader",
"SimpleWebPageReader",
"SitemapReader",
"SpiderWebReader",
"TrafilaturaWebReader",
"UnstructuredURLLoader",
"WholeSiteReader",
"ZyteWebReader",
]
|
import argparse
import urllib
from abc import ABC
from http import HTTPStatus
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _create_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
@staticmethod
def is_ready(ctrl_address: str, protocol: Optional[str] = 'grpc', **kwargs) -> bool:
"""
Check if status is ready.
:param ctrl_address: the address where the control request needs to be sent
:param protocol: protocol of the gateway runtime
:param kwargs: extra keyword arguments
:return: True if status is ready else False.
"""
if protocol is None or protocol == 'grpc':
res = super().is_ready(ctrl_address)
else:
try:
conn = urllib.request.urlopen(url=f'http://{ctrl_address}')
res = conn.code == HTTPStatus.OK
except:
res = False
return res
@classmethod
def wait_for_ready_or_shutdown(
cls,
timeout: Optional[float],
ready_or_shutdown_event: Union['multiprocessing.Event', 'threading.Event'],
ctrl_address: str,
protocol: Optional[str] = 'grpc',
**kwargs,
):
"""
Check if the runtime has successfully started
:param timeout: The time to wait before readiness or failure is determined
:param ctrl_address: the address where the control message needs to be sent
:param ready_or_shutdown_event: the multiprocessing event to detect if the process failed or is ready
:param protocol: protocol of the gateway runtime
:param kwargs: extra keyword arguments
:return: True if is ready or it needs to be shutdown
"""
return super().wait_for_ready_or_shutdown(
timeout=timeout,
ready_or_shutdown_event=ready_or_shutdown_event,
ctrl_address=ctrl_address,
protocol=protocol,
)
|
import argparse
from abc import ABC
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _create_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize as deserialize
from keras.src.quantizers import get as get
from keras.src.quantizers import serialize as serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize
from keras.src.quantizers.quantizers import (
compute_float8_amax_history as compute_float8_amax_history,
)
from keras.src.quantizers.quantizers import (
compute_float8_scale as compute_float8_scale,
)
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars,
)
from keras.src.quantizers.quantizers import pack_int4 as pack_int4
from keras.src.quantizers.quantizers import (
quantize_and_dequantize as quantize_and_dequantize,
)
from keras.src.quantizers.quantizers import unpack_int4 as unpack_int4
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize as deserialize
from keras.src.quantizers import get as get
from keras.src.quantizers import serialize as serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize
from keras.src.quantizers.quantizers import (
compute_float8_amax_history as compute_float8_amax_history,
)
from keras.src.quantizers.quantizers import (
compute_float8_scale as compute_float8_scale,
)
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars,
)
from keras.src.quantizers.quantizers import (
quantize_and_dequantize as quantize_and_dequantize,
)
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk", optional=True
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = requests.post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
import requests
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk", optional=True
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import WhatsAppChatLoader
from langchain_community.document_loaders.whatsapp_chat import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_rows": "langchain_community.document_loaders.whatsapp_chat",
"WhatsAppChatLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"WhatsAppChatLoader",
"concatenate_rows",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import WhatsAppChatLoader
from langchain_community.document_loaders.whatsapp_chat import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_rows": "langchain_community.document_loaders.whatsapp_chat",
"WhatsAppChatLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"concatenate_rows",
"WhatsAppChatLoader",
]
|
import logging
import os
import json
from typing import Any, List, Optional, cast
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = logging.getLogger(__name__)
class MongoDBAtlasBM25Retriever(BaseRetriever):
def __init__(
self,
mongodb_client: Optional[Any] = None,
db_name: str = "default_db",
collection_name: str = "default_collection",
index_name: str = "default",
text_key: str = "text",
metadata_key: str = "metadata",
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
) -> None:
"""
Initialize the vector store.
Args:
mongodb_client: A MongoDB client.
db_name: A MongoDB database name.
collection_name: A MongoDB collection name.
index_name: A MongoDB Atlas Vector Search index name.
text_key: A MongoDB field that will contain the text for each document.
metadata_key: A MongoDB field that will contain
"""
import_err_msg = "`pymongo` package not found, please run `pip install pymongo`"
try:
from importlib.metadata import version
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
except ImportError:
raise ImportError(import_err_msg)
if mongodb_client is not None:
self._mongodb_client = cast(MongoClient, mongodb_client)
else:
if "MONGO_URI" not in os.environ:
raise ValueError(
"Must specify MONGO_URI via env variable "
"if not directly passing in client."
)
self._mongodb_client = MongoClient(
os.environ["MONGO_URI"],
driver=DriverInfo(name="llama-index", version=version("llama-index")),
)
self._db = self._mongodb_client[db_name]
self._collection = self._db[collection_name]
self._index_name = index_name
self._text_key = text_key
self._metadata_key = metadata_key
self._similarity_top_k = similarity_top_k
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""
query = query_bundle.query_str
pipeline = [
{
"$search": {
"index": self._index_name,
"text": {"query": query, "path": self._text_key},
}
},
{"$addFields": {"score": {"$meta": "searchScore"}}},
{"$sort": {"score": -1}},
{"$limit": self._similarity_top_k},
]
results = list(self._collection.aggregate(pipeline))
retrieve_nodes = []
for result in results[: self._similarity_top_k]:
doc = self._collection.find_one({"_id": result["_id"]})
node = doc[self._text_key]
node_content = json.loads(
doc.get("metadata", {}).get("_node_content", "{}")
)
metadata_dict = doc.pop(self._metadata_key)
node = None
try:
node = metadata_dict_to_node(metadata_dict)
node.set_content(doc["text"])
except Exception:
node = TextNode(
text=doc["text"],
id_=doc["id"],
metadata=doc.get("metadata", {}),
start_char_idx=node_content.get("start_char_idx", None),
end_char_idx=node_content.get("end_char_idx", None),
relationships=node_content.get("relationships", None),
)
node_with_score = NodeWithScore(node=node, score=result["score"])
retrieve_nodes.append(node_with_score)
return retrieve_nodes
|
import logging
import os
import json
from typing import Any, List, Optional, cast
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = logging.getLogger(__name__)
class MongoDBAtlasBM25Retriever(BaseRetriever):
def __init__(
self,
mongodb_client: Optional[Any] = None,
db_name: str = "default_db",
collection_name: str = "default_collection",
index_name: str = "default",
text_key: str = "text",
metadata_key: str = "metadata",
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
) -> None:
"""Initialize the vector store.
Args:
mongodb_client: A MongoDB client.
db_name: A MongoDB database name.
collection_name: A MongoDB collection name.
index_name: A MongoDB Atlas Vector Search index name.
text_key: A MongoDB field that will contain the text for each document.
metadata_key: A MongoDB field that will contain
"""
import_err_msg = "`pymongo` package not found, please run `pip install pymongo`"
try:
from importlib.metadata import version
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
except ImportError:
raise ImportError(import_err_msg)
if mongodb_client is not None:
self._mongodb_client = cast(MongoClient, mongodb_client)
else:
if "MONGO_URI" not in os.environ:
raise ValueError(
"Must specify MONGO_URI via env variable "
"if not directly passing in client."
)
self._mongodb_client = MongoClient(
os.environ["MONGO_URI"],
driver=DriverInfo(name="llama-index", version=version("llama-index")),
)
self._db = self._mongodb_client[db_name]
self._collection = self._db[collection_name]
self._index_name = index_name
self._text_key = text_key
self._metadata_key = metadata_key
self._similarity_top_k = similarity_top_k
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""
query = query_bundle.query_str
pipeline = [
{
"$search": {
"index": self._index_name,
"text": {"query": query, "path": self._text_key},
}
},
{"$addFields": {"score": {"$meta": "searchScore"}}},
{"$sort": {"score": -1}},
{"$limit": self._similarity_top_k},
]
results = list(self._collection.aggregate(pipeline))
retrieve_nodes = []
for result in results[: self._similarity_top_k]:
doc = self._collection.find_one({"_id": result["_id"]})
node = doc[self._text_key]
node_content = json.loads(
doc.get("metadata", {}).get("_node_content", "{}")
)
metadata_dict = doc.pop(self._metadata_key)
node = None
try:
node = metadata_dict_to_node(metadata_dict)
node.set_content(doc["text"])
except Exception:
node = TextNode(
text=doc["text"],
id_=doc["id"],
metadata=doc.get("metadata", {}),
start_char_idx=node_content.get("start_char_idx", None),
end_char_idx=node_content.get("end_char_idx", None),
relationships=node_content.get("relationships", None),
)
node_with_score = NodeWithScore(node=node, score=result["score"])
retrieve_nodes.append(node_with_score)
return retrieve_nodes
|
# ReAct agent formatter
import logging
from abc import abstractmethod
from typing import List, Optional, Sequence
from llama_index.core.agent.react.prompts import (
CONTEXT_REACT_CHAT_SYSTEM_HEADER,
REACT_CHAT_SYSTEM_HEADER,
)
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ObservationReasoningStep,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict, Field
from llama_index.core.tools import BaseTool
logger = logging.getLogger(__name__)
def get_react_tool_descriptions(tools: Sequence[BaseTool]) -> List[str]:
"""Tool."""
tool_descs = []
for tool in tools:
tool_desc = (
f"> Tool Name: {tool.metadata.name}\n"
f"Tool Description: {tool.metadata.description}\n"
f"Tool Args: {tool.metadata.fn_schema_str}\n"
)
tool_descs.append(tool_desc)
return tool_descs
# TODO: come up with better name
class BaseAgentChatFormatter(BaseModel):
"""Base chat formatter."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
class ReActChatFormatter(BaseAgentChatFormatter):
"""ReAct chat formatter."""
system_header: str = REACT_CHAT_SYSTEM_HEADER # default
context: str = "" # not needed w/ default
observation_role: MessageRole = Field(
default=MessageRole.USER,
description=(
"Message role of tool outputs. If the LLM you use supports function/tool "
"calling, you may set it to `MessageRole.TOOL` to avoid the tool outputs "
"being misinterpreted as new user messages."
),
)
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
current_reasoning = current_reasoning or []
format_args = {
"tool_desc": "\n".join(get_react_tool_descriptions(tools)),
"tool_names": ", ".join([tool.metadata.get_name() for tool in tools]),
}
if self.context:
format_args["context"] = self.context
fmt_sys_header = self.system_header.format(**format_args)
# format reasoning history as alternating user and assistant messages
# where the assistant messages are thoughts and actions and the tool
# messages are observations
reasoning_history = []
for reasoning_step in current_reasoning:
if isinstance(reasoning_step, ObservationReasoningStep):
message = ChatMessage(
role=self.observation_role,
content=reasoning_step.get_content(),
)
else:
message = ChatMessage(
role=MessageRole.ASSISTANT,
content=reasoning_step.get_content(),
)
reasoning_history.append(message)
return [
ChatMessage(role=MessageRole.SYSTEM, content=fmt_sys_header),
*chat_history,
*reasoning_history,
]
@classmethod
def from_defaults(
cls,
system_header: Optional[str] = None,
context: Optional[str] = None,
observation_role: MessageRole = MessageRole.USER,
) -> "ReActChatFormatter":
"""Create ReActChatFormatter from defaults."""
if not system_header:
system_header = (
REACT_CHAT_SYSTEM_HEADER
if not context
else CONTEXT_REACT_CHAT_SYSTEM_HEADER
)
return ReActChatFormatter(
system_header=system_header,
context=context or "",
observation_role=observation_role,
)
@classmethod
def from_context(cls, context: str) -> "ReActChatFormatter":
"""Create ReActChatFormatter from context.
NOTE: deprecated
"""
logger.warning(
"ReActChatFormatter.from_context is deprecated, please use `from_defaults` instead."
)
return ReActChatFormatter.from_defaults(
system_header=CONTEXT_REACT_CHAT_SYSTEM_HEADER, context=context
)
|
# ReAct agent formatter
import logging
from abc import abstractmethod
from typing import List, Optional, Sequence
from llama_index.core.agent.react.prompts import (
CONTEXT_REACT_CHAT_SYSTEM_HEADER,
REACT_CHAT_SYSTEM_HEADER,
)
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ObservationReasoningStep,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from llama_index.core.tools import BaseTool
logger = logging.getLogger(__name__)
def get_react_tool_descriptions(tools: Sequence[BaseTool]) -> List[str]:
"""Tool."""
tool_descs = []
for tool in tools:
tool_desc = (
f"> Tool Name: {tool.metadata.name}\n"
f"Tool Description: {tool.metadata.description}\n"
f"Tool Args: {tool.metadata.fn_schema_str}\n"
)
tool_descs.append(tool_desc)
return tool_descs
# TODO: come up with better name
class BaseAgentChatFormatter(BaseModel):
"""Base chat formatter."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
class ReActChatFormatter(BaseAgentChatFormatter):
"""ReAct chat formatter."""
system_header: str = REACT_CHAT_SYSTEM_HEADER # default
context: str = "" # not needed w/ default
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
"""Format chat history into list of ChatMessage."""
current_reasoning = current_reasoning or []
format_args = {
"tool_desc": "\n".join(get_react_tool_descriptions(tools)),
"tool_names": ", ".join([tool.metadata.get_name() for tool in tools]),
}
if self.context:
format_args["context"] = self.context
fmt_sys_header = self.system_header.format(**format_args)
# format reasoning history as alternating user and assistant messages
# where the assistant messages are thoughts and actions and the user
# messages are observations
reasoning_history = []
for reasoning_step in current_reasoning:
if isinstance(reasoning_step, ObservationReasoningStep):
message = ChatMessage(
role=MessageRole.USER,
content=reasoning_step.get_content(),
)
else:
message = ChatMessage(
role=MessageRole.ASSISTANT,
content=reasoning_step.get_content(),
)
reasoning_history.append(message)
return [
ChatMessage(role=MessageRole.SYSTEM, content=fmt_sys_header),
*chat_history,
*reasoning_history,
]
@classmethod
def from_defaults(
cls,
system_header: Optional[str] = None,
context: Optional[str] = None,
) -> "ReActChatFormatter":
"""Create ReActChatFormatter from defaults."""
if not system_header:
system_header = (
REACT_CHAT_SYSTEM_HEADER
if not context
else CONTEXT_REACT_CHAT_SYSTEM_HEADER
)
return ReActChatFormatter(
system_header=system_header,
context=context or "",
)
@classmethod
def from_context(cls, context: str) -> "ReActChatFormatter":
"""Create ReActChatFormatter from context.
NOTE: deprecated
"""
logger.warning(
"ReActChatFormatter.from_context is deprecated, please use `from_defaults` instead."
)
return ReActChatFormatter.from_defaults(
system_header=CONTEXT_REACT_CHAT_SYSTEM_HEADER, context=context
)
|
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Cane be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text_url=str(self))
def load_to_bytes(self, timeout: Optional[float] = None) -> bytes:
"""
Load the text file into a bytes object.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt_bytes = doc.remote_url.load_to_bytes()
local_txt_bytes = doc.local_url.load_to_bytes()
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: the text file content as bytes
"""
return _uri_to_blob(self, timeout=timeout)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = _uri_to_blob(self, timeout=timeout)
return _bytes.decode(charset)
|
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Cane be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text_url=str(self))
def load_to_bytes(self, timeout: Optional[float] = None) -> bytes:
"""
Load the text file into a bytes object.
EXAMPLE USAGE
.. code-block:: python
from docarray import Document
from docarray.typing import TextUrl
class MyDoc(Document):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt_bytes = doc.remote_url.load_to_bytes()
local_txt_bytes = doc.local_url.load_to_bytes()
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: the text file content as bytes
"""
return _uri_to_blob(self, timeout=timeout)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import Document
from docarray.typing import TextUrl
class MyDoc(Document):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = _uri_to_blob(self, timeout=timeout)
return _bytes.decode(charset)
|
# Initialize extension and backend first
from . import ( # noqa # usort: skip
_extension,
_backend,
)
from . import ( # noqa: F401
backend, # For BC
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from ._backend import AudioMetaData, get_audio_backend, info, list_audio_backends, load, save, set_audio_backend
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"AudioMetaData",
"load",
"info",
"save",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
from . import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from ._backend.common import AudioMetaData # noqa
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
def _is_backend_dispatcher_enabled():
import os
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER", default="1") == "1"
if _is_backend_dispatcher_enabled():
from ._backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
else:
from .backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
_init_backend()
# for backward compatibility. This has to happen after _backend is imported.
from . import backend # noqa: F401
__all__ = [
"AudioMetaData",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import parse_obj_as
from docarray.typing.abstract_type import AbstractType
from docarray.utils._internal.pydantic import bytes_validator, is_pydantic_v2
if is_pydantic_v2:
from pydantic_core import core_schema
if TYPE_CHECKING:
from docarray.proto import NodeProto
if is_pydantic_v2:
from pydantic import GetCoreSchemaHandler
T = TypeVar('T', bound='BaseBytes')
class BaseBytes(bytes, AbstractType):
"""
Bytes type for docarray
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Any,
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
if is_pydantic_v2:
@classmethod
@abstractmethod
def __get_pydantic_core_schema__(
cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'
) -> 'core_schema.CoreSchema':
return core_schema.general_after_validator_function(
cls.validate,
core_schema.bytes_schema(),
)
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import parse_obj_as
from docarray.typing.abstract_type import AbstractType
from docarray.utils._internal.pydantic import bytes_validator, is_pydantic_v2
if is_pydantic_v2:
from pydantic_core import core_schema
if TYPE_CHECKING:
from docarray.proto import NodeProto
if is_pydantic_v2:
from pydantic import GetCoreSchemaHandler
T = TypeVar('T', bound='BaseBytes')
class BaseBytes(bytes, AbstractType):
"""
Bytes type for docarray
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Any,
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
if is_pydantic_v2:
@classmethod
@abstractmethod
def __get_pydantic_core_schema__(
cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'
) -> 'core_schema.CoreSchema':
return core_schema.general_after_validator_function(
cls.validate,
core_schema.bytes_schema(),
)
|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("../assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, canvas_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
# %%
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
# %%
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
# %%
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
# %%
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
import torchvision
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("../assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, canvas_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
# %%
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
# %%
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
# %%
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
# %%
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
GGUF_FILE_EXTENSION = "gguf"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
|
from __future__ import annotations
import os
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class FGVCAircraft(VisionDataset):
"""`FGVC Aircraft <https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/>`_ Dataset.
The dataset contains 10,000 images of aircraft, with 100 images for each of 100
different aircraft model variants, most of which are airplanes.
Aircraft models are organized in a three-levels hierarchy. The three levels, from
finer to coarser, are:
- ``variant``, e.g. Boeing 737-700. A variant collapses all the models that are visually
indistinguishable into one class. The dataset comprises 100 different variants.
- ``family``, e.g. Boeing 737. The dataset comprises 70 different families.
- ``manufacturer``, e.g. Boeing. The dataset comprises 30 different manufacturers.
Args:
root (string): Root directory of the FGVC Aircraft dataset.
split (string, optional): The dataset split, supports ``train``, ``val``,
``trainval`` and ``test``.
annotation_level (str, optional): The annotation level, supports ``variant``,
``family`` and ``manufacturer``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz"
def __init__(
self,
root: str,
split: str = "trainval",
annotation_level: str = "variant",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._annotation_level = verify_str_arg(
annotation_level, "annotation_level", ("variant", "family", "manufacturer")
)
self._data_path = os.path.join(self.root, "fgvc-aircraft-2013b")
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
annotation_file = os.path.join(
self._data_path,
"data",
{
"variant": "variants.txt",
"family": "families.txt",
"manufacturer": "manufacturers.txt",
}[self._annotation_level],
)
with open(annotation_file, "r") as f:
self.classes = [line.strip() for line in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
image_data_folder = os.path.join(self._data_path, "data", "images")
labels_file = os.path.join(self._data_path, "data", f"images_{self._annotation_level}_{self._split}.txt")
self._image_files = []
self._labels = []
with open(labels_file, "r") as f:
for line in f:
image_name, label_name = line.strip().split(" ", 1)
self._image_files.append(os.path.join(image_data_folder, f"{image_name}.jpg"))
self._labels.append(self.class_to_idx[label_name])
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _download(self) -> None:
"""
Download the FGVC Aircraft dataset archive and extract it under root.
"""
if self._check_exists():
return
download_and_extract_archive(self._URL, self.root)
def _check_exists(self) -> bool:
return os.path.exists(self._data_path) and os.path.isdir(self._data_path)
|
from __future__ import annotations
import os
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class FGVCAircraft(VisionDataset):
"""`FGVC Aircraft <https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/>`_ Dataset.
The dataset contains 10,000 images of aircraft, with 100 images for each of 100
different aircraft model variants, most of which are airplanes.
Aircraft models are organized in a three-levels hierarchy. The three levels, from
finer to coarser, are:
- ``variant``, e.g. Boeing 737-700. A variant collapses all the models that are visually
indistinguishable into one class. The dataset comprises 100 different variants.
- ``family``, e.g. Boeing 737. The dataset comprises 70 different families.
- ``manufacturer``, e.g. Boeing. The dataset comprises 30 different manufacturers.
Args:
root (string): Root directory of the FGVC Aircraft dataset.
split (string, optional): The dataset split, supports ``train``, ``val``,
``trainval`` and ``test``.
annotation_level (str, optional): The annotation level, supports ``variant``,
``family`` and ``manufacturer``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz"
def __init__(
self,
root: str,
split: str = "trainval",
annotation_level: str = "variant",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._annotation_level = verify_str_arg(
annotation_level, "annotation_level", ("variant", "family", "manufacturer")
)
self._data_path = os.path.join(self.root, "fgvc-aircraft-2013b")
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
annotation_file = os.path.join(
self._data_path,
"data",
{
"variant": "variants.txt",
"family": "families.txt",
"manufacturer": "manufacturers.txt",
}[self._annotation_level],
)
with open(annotation_file, "r") as f:
self.classes = [line.strip() for line in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
image_data_folder = os.path.join(self._data_path, "data", "images")
labels_file = os.path.join(self._data_path, "data", f"images_{self._annotation_level}_{self._split}.txt")
self._image_files = []
self._labels = []
with open(labels_file, "r") as f:
for line in f:
image_name, label_name = line.strip().split(" ", 1)
self._image_files.append(os.path.join(image_data_folder, f"{image_name}.jpg"))
self._labels.append(self.class_to_idx[label_name])
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _download(self) -> None:
"""
Download the FGVC Aircraft dataset archive and extract it under root.
"""
if self._check_exists():
return
download_and_extract_archive(self._URL, self.root)
def _check_exists(self) -> bool:
return os.path.exists(self._data_path) and os.path.isdir(self._data_path)
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datapoints import BoundingBox, Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_box": bounding_box,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from torchvision.prototype.features import BoundingBox, Label
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_box": bounding_box,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.17.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.16.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import pytest
from jina import Document, Flow
@pytest.mark.parametrize('endpoint', ['foo', 'bar'])
@pytest.mark.parametrize(
'uses', ['jinaai+sandbox://jina-ai/Hello']
)
def test_sandbox(endpoint, uses):
with Flow().add(uses=uses) as f:
da = f.post(
endpoint,
[
Document(text="dog world"),
Document(text="cat world"),
Document(id="a", text="elephant world"),
Document(id="b", text="monkey world"),
],
)
assert da.texts == [
'hello dog world',
'hello cat world',
'hello elephant world',
'hello monkey world',
]
|
import pytest
from jina import Document, Flow
@pytest.mark.parametrize('endpoint', ['foo', 'bar'])
def test_sandbox(endpoint):
with Flow().add(uses='jinahub+sandbox://Hello') as f:
da = f.post(
endpoint,
[
Document(text="dog world"),
Document(text="cat world"),
Document(id="a", text="elephant world"),
Document(id="b", text="monkey world"),
],
)
assert da.texts == [
'hello dog world',
'hello cat world',
'hello elephant world',
'hello monkey world',
]
|
import numpy as np
import pytest
import scipy.ndimage
import torch
from whisper.timing import dtw_cpu, dtw_cuda, median_filter
sizes = [
(10, 20),
(32, 16),
(123, 1500),
(234, 189),
]
shapes = [
(10,),
(1, 15),
(4, 5, 345),
(6, 12, 240, 512),
]
@pytest.mark.parametrize("N, M", sizes)
def test_dtw(N: int, M: int):
steps = np.concatenate([np.zeros(N - 1), np.ones(M - 1)])
np.random.shuffle(steps)
x = np.random.random((N, M)).astype(np.float32)
i, j, k = 0, 0, 0
trace = []
while True:
x[i, j] -= 1
trace.append((i, j))
if k == len(steps):
break
if k + 1 < len(steps) and steps[k] != steps[k + 1]:
i += 1
j += 1
k += 2
continue
if steps[k] == 0:
i += 1
if steps[k] == 1:
j += 1
k += 1
trace = np.array(trace).T
dtw_trace = dtw_cpu(x)
assert np.allclose(trace, dtw_trace)
@pytest.mark.requires_cuda
@pytest.mark.parametrize("N, M", sizes)
def test_dtw_cuda_equivalence(N: int, M: int):
x_numpy = np.random.randn(N, M).astype(np.float32)
x_cuda = torch.from_numpy(x_numpy).cuda()
trace_cpu = dtw_cpu(x_numpy)
trace_cuda = dtw_cuda(x_cuda)
assert np.allclose(trace_cpu, trace_cuda)
@pytest.mark.parametrize("shape", shapes)
def test_median_filter(shape):
x = torch.randn(*shape)
for filter_width in [3, 5, 7, 13]:
filtered = median_filter(x, filter_width)
# using np.pad to reflect-pad, because Scipy's behavior is different near the edges.
pad_width = filter_width // 2
padded_x = np.pad(
x, [(0, 0)] * (x.ndim - 1) + [(pad_width, pad_width)], mode="reflect"
)
scipy_filtered = scipy.ndimage.median_filter(
padded_x, [1] * (x.ndim - 1) + [filter_width]
)
scipy_filtered = scipy_filtered[..., pad_width:-pad_width]
assert np.allclose(filtered, scipy_filtered)
@pytest.mark.requires_cuda
@pytest.mark.parametrize("shape", shapes)
def test_median_filter_equivalence(shape):
x = torch.randn(*shape)
for filter_width in [3, 5, 7, 13]:
filtered_cpu = median_filter(x, filter_width)
filtered_gpu = median_filter(x.cuda(), filter_width).cpu()
assert np.allclose(filtered_cpu, filtered_gpu)
|
import pytest
import numpy as np
import scipy.ndimage
import torch
from whisper.timing import dtw_cpu, dtw_cuda, median_filter
sizes = [
(10, 20), (32, 16), (123, 1500), (234, 189),
]
shapes = [
(10,), (1, 15), (4, 5, 345), (6, 12, 240, 512),
]
@pytest.mark.parametrize("N, M", sizes)
def test_dtw(N: int, M: int):
steps = np.concatenate([np.zeros(N - 1), np.ones(M - 1)])
np.random.shuffle(steps)
x = np.random.random((N, M)).astype(np.float32)
i, j, k = 0, 0, 0
trace = []
while True:
x[i, j] -= 1
trace.append((i, j))
if k == len(steps):
break
if k + 1 < len(steps) and steps[k] != steps[k + 1]:
i += 1
j += 1
k += 2
continue
if steps[k] == 0:
i += 1
if steps[k] == 1:
j += 1
k += 1
trace = np.array(trace).T
dtw_trace = dtw_cpu(x)
assert np.allclose(trace, dtw_trace)
@pytest.mark.requires_cuda
@pytest.mark.parametrize("N, M", sizes)
def test_dtw_cuda_equivalence(N: int, M: int):
x_numpy = np.random.randn(N, M).astype(np.float32)
x_cuda = torch.from_numpy(x_numpy).cuda()
trace_cpu = dtw_cpu(x_numpy)
trace_cuda = dtw_cuda(x_cuda)
assert np.allclose(trace_cpu, trace_cuda)
@pytest.mark.parametrize("shape", shapes)
def test_median_filter(shape):
x = torch.randn(*shape)
for filter_width in [3, 5, 7, 13]:
filtered = median_filter(x, filter_width)
# using np.pad to reflect-pad, because Scipy's behavior is different near the edges.
pad_width = filter_width // 2
padded_x = np.pad(x, [(0, 0)] * (x.ndim - 1) + [(pad_width, pad_width)], mode="reflect")
scipy_filtered = scipy.ndimage.median_filter(padded_x, [1] * (x.ndim - 1) + [filter_width])
scipy_filtered = scipy_filtered[..., pad_width:-pad_width]
assert np.allclose(filtered, scipy_filtered)
@pytest.mark.requires_cuda
@pytest.mark.parametrize("shape", shapes)
def test_median_filter_equivalence(shape):
x = torch.randn(*shape)
for filter_width in [3, 5, 7, 13]:
filtered_cpu = median_filter(x, filter_width)
filtered_gpu = median_filter(x.cuda(), filter_width).cpu()
assert np.allclose(filtered_cpu, filtered_gpu)
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving.serialization_lib import deserialize_keras_object
@keras_export("keras.layers.Masking")
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0.0))
model.add(keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
Note: in the Keras masking convention, a masked timestep is denoted by
a mask value of `False`, while a non-masked (i.e. usable) timestep
is denoted by a mask value of `True`.
"""
def __init__(self, mask_value=0.0, **kwargs):
super().__init__(**kwargs)
# `mask_value` can be a serialized tensor, hence verify it
if isinstance(mask_value, dict) and mask_value.get("config", None):
mask_value = deserialize_keras_object(mask_value)
self.mask_value = mask_value
self.supports_masking = True
self.built = True
def compute_mask(self, inputs, mask=None):
return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = ops.any(
ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True
)
# Set masked outputs to 0
outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype)
# Compute the mask and outputs simultaneously.
backend.set_keras_mask(outputs, mask=ops.squeeze(boolean_mask, axis=-1))
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"mask_value": self.mask_value}
return {**base_config, **config}
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Masking")
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0.0))
model.add(keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
Note: in the Keras masking convention, a masked timestep is denoted by
a mask value of `False`, while a non-masked (i.e. usable) timestep
is denoted by a mask value of `True`.
"""
def __init__(self, mask_value=0.0, **kwargs):
super().__init__(**kwargs)
self.mask_value = mask_value
self.supports_masking = True
self.built = True
def compute_mask(self, inputs, mask=None):
return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = ops.any(
ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True
)
# Set masked outputs to 0
outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype)
# Compute the mask and outputs simultaneously.
backend.set_keras_mask(outputs, mask=ops.squeeze(boolean_mask, axis=-1))
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"mask_value": self.mask_value}
return {**base_config, **config}
|
"""LLMResult class."""
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.run_info import RunInfo
class LLMResult(BaseModel):
"""A container for results of an LLM call.
Both chat models and LLMs generate an LLMResult object. This object contains
the generated outputs and any additional information that the model provider
wants to return.
"""
generations: list[
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
]
"""Generated outputs.
The first dimension of the list represents completions for different input
prompts.
The second dimension of the list represents different candidate generations
for a given prompt.
When returned from an LLM the type is list[list[Generation]].
When returned from a chat model the type is list[list[ChatGeneration]].
ChatGeneration is a subclass of Generation that has a field for a structured
chat message.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.
"""
run: Optional[list[RunInfo]] = None
"""List of metadata info for model call for each input."""
type: Literal["LLMResult"] = "LLMResult"
"""Type is used exclusively for serialization purposes."""
def flatten(self) -> list[LLMResult]:
"""Flatten generations into a single list.
Unpack list[list[Generation]] -> list[LLMResult] where each returned LLMResult
contains only a single Generation. If token usage information is available,
it is kept only for the LLMResult corresponding to the top-choice
Generation, to avoid over-counting of token usage downstream.
Returns:
List of LLMResults where each returned LLMResult contains a single
Generation.
"""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = deepcopy(self.llm_output)
llm_output["token_usage"] = {}
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
|
"""LLMResult class."""
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.run_info import RunInfo
class LLMResult(BaseModel):
"""A container for results of an LLM call.
Both chat models and LLMs generate an LLMResult object. This object contains
the generated outputs and any additional information that the model provider
wants to return.
"""
generations: list[
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
]
"""Generated outputs.
The first dimension of the list represents completions for different input
prompts.
The second dimension of the list represents different candidate generations
for a given prompt.
When returned from an LLM the type is list[list[Generation]].
When returned from a chat model the type is list[list[ChatGeneration]].
ChatGeneration is a subclass of Generation that has a field for a structured
chat message.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.
"""
run: Optional[list[RunInfo]] = None
"""List of metadata info for model call for each input."""
type: Literal["LLMResult"] = "LLMResult" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def flatten(self) -> list[LLMResult]:
"""Flatten generations into a single list.
Unpack list[list[Generation]] -> list[LLMResult] where each returned LLMResult
contains only a single Generation. If token usage information is available,
it is kept only for the LLMResult corresponding to the top-choice
Generation, to avoid over-counting of token usage downstream.
Returns:
List of LLMResults where each returned LLMResult contains a single
Generation.
"""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = deepcopy(self.llm_output)
llm_output["token_usage"] = {}
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
from pydantic import ConfigDict
from langchain.chains.router.base import RouterChain
class EmbeddingRouterChain(RouterChain):
"""Chain that uses embeddings to route between options."""
vectorstore: VectorStore
routing_keys: list[str] = ["query"]
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the LLM chain prompt expects.
:meta private:
"""
return self.routing_keys
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_input = ", ".join([inputs[k] for k in self.routing_keys])
results = self.vectorstore.similarity_search(_input, k=1)
return {"next_inputs": inputs, "destination": results[0].metadata["name"]}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_input = ", ".join([inputs[k] for k in self.routing_keys])
results = await self.vectorstore.asimilarity_search(_input, k=1)
return {"next_inputs": inputs, "destination": results[0].metadata["name"]}
@classmethod
def from_names_and_descriptions(
cls,
names_and_descriptions: Sequence[tuple[str, Sequence[str]]],
vectorstore_cls: type[VectorStore],
embeddings: Embeddings,
**kwargs: Any,
) -> EmbeddingRouterChain:
"""Convenience constructor."""
documents = []
for name, descriptions in names_and_descriptions:
documents.extend(
[
Document(page_content=description, metadata={"name": name})
for description in descriptions
]
)
vectorstore = vectorstore_cls.from_documents(documents, embeddings)
return cls(vectorstore=vectorstore, **kwargs)
@classmethod
async def afrom_names_and_descriptions(
cls,
names_and_descriptions: Sequence[tuple[str, Sequence[str]]],
vectorstore_cls: type[VectorStore],
embeddings: Embeddings,
**kwargs: Any,
) -> EmbeddingRouterChain:
"""Convenience constructor."""
documents = []
documents.extend(
[
Document(page_content=description, metadata={"name": name})
for name, descriptions in names_and_descriptions
for description in descriptions
]
)
vectorstore = await vectorstore_cls.afrom_documents(documents, embeddings)
return cls(vectorstore=vectorstore, **kwargs)
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
from pydantic import ConfigDict
from langchain.chains.router.base import RouterChain
class EmbeddingRouterChain(RouterChain):
"""Chain that uses embeddings to route between options."""
vectorstore: VectorStore
routing_keys: list[str] = ["query"]
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the LLM chain prompt expects.
:meta private:
"""
return self.routing_keys
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_input = ", ".join([inputs[k] for k in self.routing_keys])
results = self.vectorstore.similarity_search(_input, k=1)
return {"next_inputs": inputs, "destination": results[0].metadata["name"]}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_input = ", ".join([inputs[k] for k in self.routing_keys])
results = await self.vectorstore.asimilarity_search(_input, k=1)
return {"next_inputs": inputs, "destination": results[0].metadata["name"]}
@classmethod
def from_names_and_descriptions(
cls,
names_and_descriptions: Sequence[tuple[str, Sequence[str]]],
vectorstore_cls: type[VectorStore],
embeddings: Embeddings,
**kwargs: Any,
) -> EmbeddingRouterChain:
"""Convenience constructor."""
documents = []
for name, descriptions in names_and_descriptions:
for description in descriptions:
documents.append(
Document(page_content=description, metadata={"name": name})
)
vectorstore = vectorstore_cls.from_documents(documents, embeddings)
return cls(vectorstore=vectorstore, **kwargs)
@classmethod
async def afrom_names_and_descriptions(
cls,
names_and_descriptions: Sequence[tuple[str, Sequence[str]]],
vectorstore_cls: type[VectorStore],
embeddings: Embeddings,
**kwargs: Any,
) -> EmbeddingRouterChain:
"""Convenience constructor."""
documents = []
for name, descriptions in names_and_descriptions:
for description in descriptions:
documents.append(
Document(page_content=description, metadata={"name": name})
)
vectorstore = await vectorstore_cls.afrom_documents(documents, embeddings)
return cls(vectorstore=vectorstore, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(
self,
num_ins: int,
fusion_level: int,
seg_scale_factor=1 / 8,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 183,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
ignore_label: int = None,
loss_weight: float = None,
loss_seg: ConfigDict = dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2),
init_cfg: MultiConfig = dict(
type='Kaiming', override=dict(name='conv_logits'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.seg_scale_factor = seg_scale_factor
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = MODELS.build(loss_seg)
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (tuple[Tensor]): Multi scale feature maps.
Returns:
tuple[Tensor]:
- mask_preds (Tensor): Predicted mask logits.
- x (Tensor): Fused feature.
"""
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_preds = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_preds, x
def loss(self, mask_preds: Tensor, labels: Tensor) -> Tensor:
"""Loss function.
Args:
mask_preds (Tensor): Predicted mask logits.
labels (Tensor): Ground truth.
Returns:
Tensor: Semantic segmentation loss.
"""
labels = F.interpolate(
labels.float(), scale_factor=self.seg_scale_factor, mode='nearest')
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_preds, labels)
return loss_semantic_seg
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(
self,
num_ins: int,
fusion_level: int,
seg_scale_factor=1 / 8,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 183,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
ignore_label: int = None,
loss_weight: float = None,
loss_seg: ConfigDict = dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2),
init_cfg: MultiConfig = dict(
type='Kaiming', override=dict(name='conv_logits'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.seg_scale_factor = seg_scale_factor
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = MODELS.build(loss_seg)
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (tuple[Tensor]): Multi scale feature maps.
Returns:
tuple[Tensor]:
- mask_pred (Tensor): Predicted mask logits.
- x (Tensor): Fused feature.
"""
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
def loss(self, mask_pred: Tensor, labels: Tensor) -> Tensor:
"""Loss function.
Args:
mask_pred (Tensor): Predicted mask logits.
labels (Tensor): Ground truth.
Returns:
Tensor: Semantic segmentation loss.
"""
labels = F.interpolate(
labels.float(), scale_factor=self.seg_scale_factor, mode='nearest')
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from torch.utils.data import BatchSampler, Sampler
from mmdet.datasets.samplers.track_img_sampler import TrackImgSampler
from mmdet.registry import DATA_SAMPLERS
# TODO: maybe replace with a data_loader wrapper
@DATA_SAMPLERS.register_module()
class AspectRatioBatchSampler(BatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __init__(self,
sampler: Sampler,
batch_size: int,
drop_last: bool = False) -> None:
if not isinstance(sampler, Sampler):
raise TypeError('sampler should be an instance of ``Sampler``, '
f'but got {sampler}')
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError('batch_size should be a positive integer value, '
f'but got batch_size={batch_size}')
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
# two groups for w < h and w >= h
self._aspect_ratio_buckets = [[] for _ in range(2)]
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
data_info = self.sampler.dataset.get_data_info(idx)
width, height = data_info['width'], data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
def __len__(self) -> int:
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
@DATA_SAMPLERS.register_module()
class TrackAspectRatioBatchSampler(AspectRatioBatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
# hard code to solve TrackImgSampler
if isinstance(self.sampler, TrackImgSampler):
video_idx, _ = idx
else:
video_idx = idx
# video_idx
data_info = self.sampler.dataset.get_data_info(video_idx)
# data_info {video_id, images, video_length}
img_data_info = data_info['images'][0]
width, height = img_data_info['width'], img_data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from torch.utils.data import BatchSampler, Sampler
from mmdet.registry import DATA_SAMPLERS
# TODO: maybe replace with a data_loader wrapper
@DATA_SAMPLERS.register_module()
class AspectRatioBatchSampler(BatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __init__(self,
sampler: Sampler,
batch_size: int,
drop_last: bool = False) -> None:
if not isinstance(sampler, Sampler):
raise TypeError('sampler should be an instance of ``Sampler``, '
f'but got {sampler}')
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError('batch_size should be a positive integer value, '
f'but got batch_size={batch_size}')
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
# two groups for w < h and w >= h
self._aspect_ratio_buckets = [[] for _ in range(2)]
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
data_info = self.sampler.dataset.get_data_info(idx)
width, height = data_info['width'], data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
def __len__(self) -> int:
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
@DATA_SAMPLERS.register_module()
class TrackAspectRatioBatchSampler(AspectRatioBatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
# video_idx
data_info = self.sampler.dataset.get_data_info(idx)
# data_info {video_id, images, video_length}
img_data_info = data_info['images'][0]
width, height = img_data_info['width'], img_data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
import gc
import sys
from collections import defaultdict
from time import time
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import lars_path, lars_path_gram, lasso_path
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("====================")
print("Iteration %03d of %03d" % (it, max_it))
print("====================")
dataset_kwargs = {
"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_features // 10,
"effective_rank": min(n_samples, n_features) / 10,
# 'effective_rank': None,
"bias": 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end="")
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method="lasso")
delta = time() - tstart
print("%0.3fs" % delta)
results["lars_path (with Gram)"].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end="")
sys.stdout.flush()
tstart = time()
lars_path(X, y, method="lasso")
delta = time() - tstart
print("%0.3fs" % delta)
results["lars_path (without Gram)"].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end="")
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results["lasso_path (with Gram)"].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end="")
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results["lasso_path (without Gram)"].append(delta)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d # register the 3d projection # noqa: F401
samples_range = np.linspace(10, 2000, 5).astype(int)
features_range = np.linspace(10, 2000, 5).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure("scikit-learn Lasso path benchmark results")
i = 1
for c, (label, timings) in zip("bcry", sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection="3d")
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
ax.set_zlabel("Time (s)")
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
import gc
import sys
from collections import defaultdict
from time import time
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import lars_path, lars_path_gram, lasso_path
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("====================")
print("Iteration %03d of %03d" % (it, max_it))
print("====================")
dataset_kwargs = {
"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_features // 10,
"effective_rank": min(n_samples, n_features) / 10,
# 'effective_rank': None,
"bias": 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end="")
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method="lasso")
delta = time() - tstart
print("%0.3fs" % delta)
results["lars_path (with Gram)"].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end="")
sys.stdout.flush()
tstart = time()
lars_path(X, y, method="lasso")
delta = time() - tstart
print("%0.3fs" % delta)
results["lars_path (without Gram)"].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end="")
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results["lasso_path (with Gram)"].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end="")
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results["lasso_path (without Gram)"].append(delta)
return results
if __name__ == "__main__":
from mpl_toolkits.mplot3d import axes3d # noqa register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(int)
features_range = np.linspace(10, 2000, 5).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure("scikit-learn Lasso path benchmark results")
i = 1
for c, (label, timings) in zip("bcry", sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection="3d")
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
ax.set_zlabel("Time (s)")
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A Recursive Feature Elimination (RFE) example with automatic tuning of the
number of features selected with cross-validation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We build a classification task using 3 informative features. The introduction
# of 2 additional redundant (i.e. correlated) features has the effect that the
# selected features vary depending on the cross-validation fold. The remaining
# features are non-informative as they are drawn at random.
from sklearn.datasets import make_classification
n_features = 15
feat_names = [f"feature_{i}" for i in range(15)]
X, y = make_classification(
n_samples=500,
n_features=n_features,
n_informative=3,
n_redundant=2,
n_repeated=0,
n_classes=8,
n_clusters_per_class=1,
class_sep=0.8,
random_state=0,
)
# %%
# Model training and selection
# ----------------------------
#
# We create the RFE object and compute the cross-validated scores. The scoring
# strategy "accuracy" optimizes the proportion of correctly classified samples.
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
min_features_to_select = 1 # Minimum number of features to consider
clf = LogisticRegression()
cv = StratifiedKFold(5)
rfecv = RFECV(
estimator=clf,
step=1,
cv=cv,
scoring="accuracy",
min_features_to_select=min_features_to_select,
n_jobs=2,
)
rfecv.fit(X, y)
print(f"Optimal number of features: {rfecv.n_features_}")
# %%
# In the present case, the model with 3 features (which corresponds to the true
# generative model) is found to be the most optimal.
#
# Plot number of features VS. cross-validation scores
# ---------------------------------------------------
import matplotlib.pyplot as plt
import pandas as pd
data = {
key: value
for key, value in rfecv.cv_results_.items()
if key in ["n_features", "mean_test_score", "std_test_score"]
}
cv_results = pd.DataFrame(data)
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Mean test accuracy")
plt.errorbar(
x=cv_results["n_features"],
y=cv_results["mean_test_score"],
yerr=cv_results["std_test_score"],
)
plt.title("Recursive Feature Elimination \nwith correlated features")
plt.show()
# %%
# From the plot above one can further notice a plateau of equivalent scores
# (similar mean value and overlapping errorbars) for 3 to 5 selected features.
# This is the result of introducing correlated features. Indeed, the optimal
# model selected by the RFE can lie within this range, depending on the
# cross-validation technique. The test accuracy decreases above 5 selected
# features, this is, keeping non-informative features leads to over-fitting and
# is therefore detrimental for the statistical performance of the models.
# %%
import numpy as np
for i in range(cv.n_splits):
mask = rfecv.cv_results_[f"split{i}_support"][
rfecv.n_features_ - 1
] # mask of features selected by the RFE
features_selected = np.ma.compressed(np.ma.masked_array(feat_names, mask=1 - mask))
print(f"Features selected in fold {i}: {features_selected}")
# %%
# In the five folds, the selected features are consistent. This is good news,
# it means that the selection is stable across folds, and it confirms that
# these features are the most informative ones.
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A Recursive Feature Elimination (RFE) example with automatic tuning of the
number of features selected with cross-validation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We build a classification task using 3 informative features. The introduction
# of 2 additional redundant (i.e. correlated) features has the effect that the
# selected features vary depending on the cross-validation fold. The remaining
# features are non-informative as they are drawn at random.
from sklearn.datasets import make_classification
n_features = 15
feat_names = [f"feature_{i}" for i in range(15)]
X, y = make_classification(
n_samples=500,
n_features=n_features,
n_informative=3,
n_redundant=2,
n_repeated=0,
n_classes=8,
n_clusters_per_class=1,
class_sep=0.8,
random_state=0,
)
# %%
# Model training and selection
# ----------------------------
#
# We create the RFE object and compute the cross-validated scores. The scoring
# strategy "accuracy" optimizes the proportion of correctly classified samples.
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
min_features_to_select = 1 # Minimum number of features to consider
clf = LogisticRegression()
cv = StratifiedKFold(5)
rfecv = RFECV(
estimator=clf,
step=1,
cv=cv,
scoring="accuracy",
min_features_to_select=min_features_to_select,
n_jobs=2,
)
rfecv.fit(X, y)
print(f"Optimal number of features: {rfecv.n_features_}")
# %%
# In the present case, the model with 3 features (which corresponds to the true
# generative model) is found to be the most optimal.
#
# Plot number of features VS. cross-validation scores
# ---------------------------------------------------
import matplotlib.pyplot as plt
import pandas as pd
data = {
key: value
for key, value in rfecv.cv_results_.items()
if key in ["n_features", "mean_test_score", "std_test_score"]
}
cv_results = pd.DataFrame(data)
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Mean test accuracy")
plt.errorbar(
x=cv_results["n_features"],
y=cv_results["mean_test_score"],
yerr=cv_results["std_test_score"],
)
plt.title("Recursive Feature Elimination \nwith correlated features")
plt.show()
# %%
# From the plot above one can further notice a plateau of equivalent scores
# (similar mean value and overlapping errorbars) for 3 to 5 selected features.
# This is the result of introducing correlated features. Indeed, the optimal
# model selected by the RFE can lie within this range, depending on the
# cross-validation technique. The test accuracy decreases above 5 selected
# features, this is, keeping non-informative features leads to over-fitting and
# is therefore detrimental for the statistical performance of the models.
# %%
import numpy as np
for i in range(cv.n_splits):
mask = rfecv.cv_results_[f"split{i}_support"][
rfecv.n_features_
] # mask of features selected by the RFE
features_selected = np.ma.compressed(np.ma.masked_array(feat_names, mask=1 - mask))
print(f"Features selected in fold {i}: {features_selected}")
# %%
# In the five folds, the selected features are consistent. This is good news,
# it means that the selection is stable across folds, and it confirms that
# these features are the most informative ones.
|
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
"""Integration test for Sms."""
from langchain_community.utilities.twilio import TwilioAPIWrapper
def test_call() -> None:
"""Test that call runs."""
twilio = TwilioAPIWrapper()
output = twilio.run("Message", "+16162904619")
assert output
|
"""Integration test for Sms."""
from langchain_community.utilities.twilio import TwilioAPIWrapper
def test_call() -> None:
"""Test that call runs."""
twilio = TwilioAPIWrapper() # type: ignore[call-arg]
output = twilio.run("Message", "+16162904619")
assert output
|
from keras.src import testing
from keras.src import tree
from keras.src.backend import KerasTensor
from keras.src.ops.symbolic_arguments import SymbolicArguments
class SymbolicArgumentsTest(testing.TestCase):
# Testing multiple args and empty kwargs
def test_args(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{},
)
self.assertEqual(args.keras_tensors, [a, b])
self.assertEqual(args._flat_arguments, [a, b])
self.assertEqual(args._single_positional_tensor, None)
# Testing single arg and single position tensor
def test_args_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
args = SymbolicArguments((a))
self.assertEqual(args.keras_tensors, [a])
self.assertEqual(args._flat_arguments, [a])
self.assertEqual(len(args.kwargs), 0)
self.assertEqual(isinstance(args.args[0], KerasTensor), True)
self.assertEqual(args._single_positional_tensor, a)
# Testing kwargs
def test_kwargs(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
self.assertEqual(args.keras_tensors, [a, b, c])
self.assertEqual(args._flat_arguments, [a, b, c])
self.assertEqual(args._single_positional_tensor, None)
# Testing conversion function with args and kwargs
def test_conversion_fn(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
sym_args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
(value, _) = sym_args.convert(lambda x: x**2)
args1 = value[0][0]
self.assertIsInstance(args1, KerasTensor)
mapped_value = tree.map_structure(lambda x: x**2, a)
self.assertEqual(mapped_value.shape, args1.shape)
self.assertEqual(mapped_value.dtype, args1.dtype)
# Testing fill in function with single args only
def test_fill_in_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
tensor_dict = {id(a): 3}
sym_args = SymbolicArguments((a))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, (3,))
# Testing fill in function with multiple args
def test_fill_in_multiple_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
tensor_dict = {id(b): 2}
sym_args = SymbolicArguments((a, b))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, ((None, 2),))
# Testing fill in function for args and kwargs
def test_fill_in(self):
shape1 = (2, 3, 4)
shape2 = (3, 2, 4)
a = KerasTensor(shape=shape1)
b = KerasTensor(shape=shape2)
c = KerasTensor(shape=shape2)
dictionary = {id(a): 3, id(c): 2}
sym_args = SymbolicArguments(
(
a,
b,
),
{"1": c},
)
(values, _) = sym_args.fill_in(dictionary)
self.assertEqual(values, ((3, None), {"1": 2}))
|
from keras.src import testing
from keras.src import tree
from keras.src.backend import KerasTensor
from keras.src.ops.symbolic_arguments import SymbolicArguments
class SymbolicArgumentsTest(testing.TestCase):
# Testing multiple args and empty kwargs
def test_args(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{},
)
self.assertEqual(args.keras_tensors, [a, b])
self.assertEqual(args._flat_arguments, [a, b])
self.assertEqual(args._single_positional_tensor, None)
# Testing single arg and single position tensor
def test_args_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
args = SymbolicArguments((a))
self.assertEqual(args.keras_tensors, [a])
self.assertEqual(args._flat_arguments, [a])
self.assertEqual(len(args.kwargs), 0)
self.assertEqual(isinstance(args.args[0], KerasTensor), True)
self.assertEqual(args._single_positional_tensor, a)
# Testing kwargs
def test_kwargs(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
self.assertEqual(args.keras_tensors, [a, b, c])
self.assertEqual(args._flat_arguments, [a, b, c])
self.assertEqual(args._single_positional_tensor, None)
# Testing conversion function with args and kwargs
def test_conversion_fn(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
sym_args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
(value, _) = sym_args.convert(lambda x: x**2)
args1 = value[0][0]
self.assertIsInstance(args1, KerasTensor)
mapped_value = tree.map_structure(lambda x: x**2, a)
self.assertEqual(mapped_value.shape, args1.shape)
self.assertEqual(mapped_value.dtype, args1.dtype)
# Testing fill in function with single args only
def test_fill_in_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
tensor_dict = {id(a): 3}
sym_args = SymbolicArguments((a))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, (3,))
# Testing fill in function with multiple args
def test_fill_in_multiple_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
tensor_dict = {id(b): 2}
sym_args = SymbolicArguments((a, b))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, ((a, 2),))
# Testing fill in function for args and kwargs
def test_fill_in(self):
shape1 = (2, 3, 4)
shape2 = (3, 2, 4)
a = KerasTensor(shape=shape1)
b = KerasTensor(shape=shape2)
c = KerasTensor(shape=shape2)
dictionary = {id(a): 3, id(c): 2}
sym_args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
(values, _) = sym_args.fill_in(dictionary)
self.assertEqual(values, ((3, b), {1: 2}))
|
import tantivy # noqa
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import VectorStoreIndex
import pytest
import lance # noqa: F401
import pytest
import pytest_asyncio
from llama_index.core import VectorStoreIndex
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.lancedb import LanceDBVectorStore
try:
from lancedb.rerankers import LinearCombinationReranker
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
deps = True
except ImportError:
deps = None
@pytest_asyncio.fixture
async def index() -> VectorStoreIndex:
vector_store = LanceDBVectorStore(
overfetch_factor=1,
mode="overwrite",
reranker=LinearCombinationReranker(weight=0.3),
)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-1")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-2")},
),
]
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes=nodes)
return VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
def test_class():
names_of_base_classes = [b.__name__ for b in LanceDBVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
@pytest.mark.skipif(
deps is None,
reason="Need to install lancedb and huggingface locally to run this test.",
)
def test_vector_query(index: VectorStoreIndex) -> None:
retriever = index.as_retriever()
response = retriever.retrieve("test1")
assert response[0].id_ == "11111111-1111-1111-1111-111111111111"
@pytest.mark.skipif(
deps is None,
reason="Need to install lancedb and huggingface locally to run this test.",
)
def test_fts_query(index: VectorStoreIndex) -> None:
try:
response = index.as_retriever(
vector_store_kwargs={"query_type": "fts"}
).retrieve("test")
except Warning as e:
pass
response = index.as_retriever(vector_store_kwargs={"query_type": "fts"}).retrieve(
"test1"
)
assert response[0].id_ == "11111111-1111-1111-1111-111111111111"
@pytest.mark.skipif(
deps is None,
reason="Need to install lancedb and huggingface locally to run this test.",
)
def test_hybrid_query(index: VectorStoreIndex) -> None:
response = index.as_retriever(
vector_store_kwargs={"query_type": "hybrid"}
).retrieve("test")
assert response[0].id_ == "11111111-1111-1111-1111-111111111111"
@pytest.mark.skipif(
deps is None,
reason="Need to install lancedb and huggingface locally to run this test.",
)
def test_delete(index: VectorStoreIndex) -> None:
index.delete(doc_id="test-0")
assert index.vector_store._table.count_rows() == 2
|
import tantivy # noqa
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import VectorStoreIndex
def test_class():
names_of_base_classes = [b.__name__ for b in LanceDBVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
def test_vector_query(index: VectorStoreIndex) -> None:
retriever = index.as_retriever()
response = retriever.retrieve("test1")
assert response[0].id_ == "11111111-1111-1111-1111-111111111111"
def test_fts_query(index: VectorStoreIndex) -> None:
try:
response = index.as_retriever(
vector_store_kwargs={"query_type": "fts"}
).retrieve("test")
except Warning as e:
pass
response = index.as_retriever(vector_store_kwargs={"query_type": "fts"}).retrieve(
"test1"
)
assert response[0].id_ == "11111111-1111-1111-1111-111111111111"
def test_hybrid_query(index: VectorStoreIndex) -> None:
response = index.as_retriever(
vector_store_kwargs={"query_type": "hybrid"}
).retrieve("test")
assert response[0].id_ == "11111111-1111-1111-1111-111111111111"
def test_delete(index: VectorStoreIndex) -> None:
index.delete(doc_id="test-0")
assert index.vector_store._table.count_rows() == 2
|
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')),
neck=dict(
type='FPN',
in_channels=[32, 64, 160, 384],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')),
neck=dict(
type='FPN',
in_channels=[32, 64, 160, 384],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
# dataset settings
dataset_type = 'MOTChallengeDataset'
data_root = 'data/MOT17/'
img_scale = (1088, 1088)
backend_args = None
# data pipeline
train_pipeline = [
dict(
type='UniformRefFrameSample',
num_ref_imgs=1,
frame_range=10,
filter_key_img=True),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadTrackAnnotations'),
dict(
type='RandomResize',
scale=img_scale,
ratio_range=(0.8, 1.2),
keep_ratio=True,
clip_object_border=False),
dict(type='PhotoMetricDistortion')
]),
dict(
type='TransformBroadcaster',
# different cropped positions for different frames
share_random_params=False,
transforms=[
dict(
type='RandomCrop', crop_size=img_scale, bbox_clip_border=False)
]),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='RandomFlip', prob=0.5),
]),
dict(type='PackTrackInputs')
]
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(type='LoadTrackAnnotations')
]),
dict(type='PackTrackInputs')
]
# dataloader
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
visibility_thr=-1,
ann_file='annotations/half-train_cocoformat.json',
data_prefix=dict(img_path='train'),
metainfo=dict(classes=('pedestrian', )),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
# Now we support two ways to test, image_based and video_based
# if you want to use video_based sampling, you can use as follows
# sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# evaluator
val_evaluator = dict(
type='MOTChallengeMetric', metric=['HOTA', 'CLEAR', 'Identity'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'MOTChallengeDataset'
data_root = 'data/MOT17/'
img_scale = (1088, 1088)
# data pipeline
train_pipeline = [
dict(
type='UniformRefFrameSample',
num_ref_imgs=1,
frame_range=10,
filter_key_img=True),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='LoadImageFromFile'),
dict(type='LoadTrackAnnotations'),
dict(
type='RandomResize',
scale=img_scale,
ratio_range=(0.8, 1.2),
keep_ratio=True,
clip_object_border=False),
dict(type='PhotoMetricDistortion')
]),
dict(
type='TransformBroadcaster',
# different cropped positions for different frames
share_random_params=False,
transforms=[
dict(
type='RandomCrop', crop_size=img_scale, bbox_clip_border=False)
]),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='RandomFlip', prob=0.5),
]),
dict(type='PackTrackInputs')
]
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(type='LoadTrackAnnotations')
]),
dict(type='PackTrackInputs')
]
# dataloader
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
visibility_thr=-1,
ann_file='annotations/half-train_cocoformat.json',
data_prefix=dict(img_path='train'),
metainfo=dict(classes=('pedestrian', )),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
# Now we support two ways to test, image_based and video_based
# if you want to use video_based sampling, you can use as follows
# sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# evaluator
val_evaluator = dict(
type='MOTChallengeMetric', metric=['HOTA', 'CLEAR', 'Identity'])
test_evaluator = val_evaluator
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import AutoAssignHead
class TestAutoAssignHead(TestCase):
def test_autoassign_head_loss(self):
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
autoassign_head = AutoAssignHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=5.0),
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in autoassign_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = autoassign_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances],
img_metas)
# When there is no truth, the neg loss should be nonzero but
# pos loss and center loss should be zero
empty_pos_loss = empty_gt_losses['loss_pos'].item()
empty_neg_loss = empty_gt_losses['loss_neg'].item()
empty_ctr_loss = empty_gt_losses['loss_center'].item()
self.assertGreater(empty_neg_loss, 0, 'neg loss should be non-zero')
self.assertEqual(
empty_pos_loss, 0,
'there should be no pos loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all pos, neg loss and center loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
onegt_pos_loss = one_gt_losses['loss_pos'].item()
onegt_neg_loss = one_gt_losses['loss_neg'].item()
onegt_ctr_loss = one_gt_losses['loss_center'].item()
self.assertGreater(onegt_pos_loss, 0, 'pos loss should be non-zero')
self.assertGreater(onegt_neg_loss, 0, 'neg loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0, 'center loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import AutoAssignHead
class TestAutoAssignHead(TestCase):
def test_autoassign_head_loss(self):
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
autoassign_head = AutoAssignHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=5.0),
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in autoassign_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = autoassign_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = autoassign_head.loss(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the neg loss should be nonzero but
# pos loss and center loss should be zero
empty_pos_loss = empty_gt_losses['loss_pos'].item()
empty_neg_loss = empty_gt_losses['loss_neg'].item()
empty_ctr_loss = empty_gt_losses['loss_center'].item()
self.assertGreater(empty_neg_loss, 0, 'neg loss should be non-zero')
self.assertEqual(
empty_pos_loss, 0,
'there should be no pos loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all pos, neg loss and center loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = autoassign_head.loss(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_pos_loss = one_gt_losses['loss_pos'].item()
onegt_neg_loss = one_gt_losses['loss_neg'].item()
onegt_ctr_loss = one_gt_losses['loss_center'].item()
self.assertGreater(onegt_pos_loss, 0, 'pos loss should be non-zero')
self.assertGreater(onegt_neg_loss, 0, 'neg loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0, 'center loss should be non-zero')
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseRequestsTool,
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseRequestsTool": "langchain_community.tools",
"RequestsGetTool": "langchain_community.tools",
"RequestsPostTool": "langchain_community.tools",
"RequestsPatchTool": "langchain_community.tools",
"RequestsPutTool": "langchain_community.tools",
"RequestsDeleteTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseRequestsTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseRequestsTool,
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseRequestsTool": "langchain_community.tools",
"RequestsGetTool": "langchain_community.tools",
"RequestsPostTool": "langchain_community.tools",
"RequestsPatchTool": "langchain_community.tools",
"RequestsPutTool": "langchain_community.tools",
"RequestsDeleteTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseRequestsTool",
"RequestsGetTool",
"RequestsPostTool",
"RequestsPatchTool",
"RequestsPutTool",
"RequestsDeleteTool",
]
|
import warnings
from typing import Any, Dict, List, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform import _RandomApplyTransform
from .utils import is_simple_tensor, query_chw
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
# TODO: in other PR (?) undeprecate those and make them use _rgb_to_gray?
class Grayscale(Transform):
_transformed_types = (
datapoints.Image,
PIL.Image.Image,
is_simple_tensor,
datapoints.Video,
)
def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None:
deprecation_msg = (
f"The transform `Grayscale(num_output_channels={num_output_channels})` "
f"is deprecated and will be removed in a future release."
)
if num_output_channels == 1:
replacement_msg = (
"transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY)"
)
else:
replacement_msg = (
"transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
")"
)
warnings.warn(f"{deprecation_msg} Instead, please use\n\n{replacement_msg}")
super().__init__()
self.num_output_channels = num_output_channels
def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels)
if isinstance(inpt, (datapoints.Image, datapoints.Video)):
output = inpt.wrap_like(inpt, output) # type: ignore[arg-type]
return output
class RandomGrayscale(_RandomApplyTransform):
_transformed_types = (
datapoints.Image,
PIL.Image.Image,
is_simple_tensor,
datapoints.Video,
)
def __init__(self, p: float = 0.1) -> None:
warnings.warn(
"The transform `RandomGrayscale(p=...)` is deprecated and will be removed in a future release. "
"Instead, please use\n\n"
"transforms.RandomApply(\n"
" transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
" )\n"
" p=...,\n"
")"
)
super().__init__(p=p)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
num_input_channels, *_ = query_chw(flat_inputs)
return dict(num_input_channels=num_input_channels)
def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"])
if isinstance(inpt, (datapoints.Image, datapoints.Video)):
output = inpt.wrap_like(inpt, output) # type: ignore[arg-type]
return output
|
import warnings
from typing import Any, Dict, List, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform import _RandomApplyTransform
from .utils import is_simple_tensor, query_chw
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
class Grayscale(Transform):
_transformed_types = (
datapoints.Image,
PIL.Image.Image,
is_simple_tensor,
datapoints.Video,
)
def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None:
deprecation_msg = (
f"The transform `Grayscale(num_output_channels={num_output_channels})` "
f"is deprecated and will be removed in a future release."
)
if num_output_channels == 1:
replacement_msg = (
"transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY)"
)
else:
replacement_msg = (
"transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
")"
)
warnings.warn(f"{deprecation_msg} Instead, please use\n\n{replacement_msg}")
super().__init__()
self.num_output_channels = num_output_channels
def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels)
if isinstance(inpt, (datapoints.Image, datapoints.Video)):
output = inpt.wrap_like(inpt, output, color_space=datapoints.ColorSpace.GRAY) # type: ignore[arg-type]
return output
class RandomGrayscale(_RandomApplyTransform):
_transformed_types = (
datapoints.Image,
PIL.Image.Image,
is_simple_tensor,
datapoints.Video,
)
def __init__(self, p: float = 0.1) -> None:
warnings.warn(
"The transform `RandomGrayscale(p=...)` is deprecated and will be removed in a future release. "
"Instead, please use\n\n"
"transforms.RandomApply(\n"
" transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
" )\n"
" p=...,\n"
")"
)
super().__init__(p=p)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
num_input_channels, *_ = query_chw(flat_inputs)
return dict(num_input_channels=num_input_channels)
def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"])
if isinstance(inpt, (datapoints.Image, datapoints.Video)):
output = inpt.wrap_like(inpt, output, color_space=datapoints.ColorSpace.GRAY) # type: ignore[arg-type]
return output
|
import os
import pytest
import time
import uuid
import pinecone.db_data
from pinecone import Pinecone, ServerlessSpec
from typing import List
from llama_index.core import StorageContext, VectorStoreIndex
from llama_index.core.embeddings import MockEmbedding
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilter,
MetadataFilters,
FilterCondition,
FilterOperator,
)
from llama_index.vector_stores.pinecone import PineconeVectorStore
MAX_WAIT_TIME = 60
EMBED_DIM = 1536
PINECONE_API_KEY = os.environ.get(
"PINECONE_API_KEY",
None,
)
should_skip = not all((PINECONE_API_KEY,))
def test_class():
names_of_base_classes = [b.__name__ for b in PineconeVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
@pytest.fixture
def nodes():
return [
TextNode(
text="Hello, world 1!",
metadata={"some_key": 1},
embedding=[0.3] * EMBED_DIM,
),
TextNode(
text="Hello, world 2!",
metadata={"some_key": 2},
embedding=[0.5] * EMBED_DIM,
),
TextNode(
text="Hello, world 3!",
metadata={"some_key": "3"},
embedding=[0.7] * EMBED_DIM,
),
]
@pytest.fixture
def pinecone_index():
index_name = f"{uuid.uuid4()}"
pc = Pinecone(api_key=PINECONE_API_KEY)
if not pc.has_index(index_name):
pc.create_index(
name=index_name,
dimension=EMBED_DIM,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
)
pc_index = pc.Index(index_name)
yield pc_index
pc.delete_index(index_name)
@pytest.fixture
def index_with_nodes(
pinecone_index: pinecone.db_data.index.Index, nodes: List[TextNode]
):
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
# Note: not ideal, but pinecone takes a while to index the nodes
start_time = time.time()
while True:
stats = pinecone_index.describe_index_stats()
if stats["total_vector_count"] != len(nodes):
if time.time() - start_time > MAX_WAIT_TIME:
raise Exception("Index not ready after 60 seconds")
time.sleep(1)
else:
break
return index
@pytest.mark.skipif(
should_skip, reason="PINECONE_API_KEY and/or PINECONE_INDEX_NAME not set"
)
def test_basic_e2e(index_with_nodes: VectorStoreIndex):
nodes = index_with_nodes.as_retriever().retrieve("Hello, world 1!")
assert len(nodes) == 2
@pytest.mark.skipif(
should_skip, reason="PINECONE_API_KEY and/or PINECONE_INDEX_NAME not set"
)
def test_retrieval_with_filters(index_with_nodes: VectorStoreIndex):
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=1,
operator=FilterOperator.EQ,
),
MetadataFilter(
key="some_key",
value=2,
operator=FilterOperator.EQ,
),
],
condition=FilterCondition.OR,
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 2
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=1,
operator=FilterOperator.GT,
),
],
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 1
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=[1, 2],
operator=FilterOperator.IN,
),
],
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 2
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value="3",
operator=FilterOperator.EQ,
),
],
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 1
|
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.pinecone import PineconeVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in PineconeVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='Audio')
class Audio(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`Audio.url`), an AudioTensor
(`Audio.tensor`), and an AnyEmbedding (`Audio.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Audio
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Audio, Text
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Audio, Text
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor = mmdoc.audio.url.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Optional, TypeVar
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
T = TypeVar('T', bound='Audio')
class Audio(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`Audio.url`), an AudioTensor
(`Audio.tensor`), and an AnyEmbedding (`Audio.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Audio
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Audio, Text
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Audio, Text
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor = mmdoc.audio.url.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
|
import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import get_asset_path, skipIfNoCtcDecoder, TempDirMixin, TorchaudioTestCase
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decoder(self, tokens=None, use_lm=True, use_lexicon=True, **kwargs):
from torchaudio.models.decoder import ctc_decoder
if use_lexicon:
lexicon_file = get_asset_path("decoder/lexicon.txt")
kenlm_file = get_asset_path("decoder/kenlm.arpa") if use_lm else None
else:
lexicon_file = None
kenlm_file = get_asset_path("decoder/kenlm_char.arpa") if use_lm else None
if tokens is None:
tokens = get_asset_path("decoder/tokens.txt")
return ctc_decoder(
lexicon=lexicon_file,
tokens=tokens,
lm=kenlm_file,
**kwargs,
)
def _get_emissions(self):
B, T, N = 4, 15, NUM_TOKENS
emissions = torch.rand(B, T, N)
return emissions
@parameterized.expand(
list(
itertools.product(
[get_asset_path("decoder/tokens.txt"), ["-", "|", "f", "o", "b", "a", "r"]],
[True, False],
[True, False],
)
),
)
def test_construct_decoder(self, tokens, use_lm, use_lexicon):
self._get_decoder(tokens=tokens, use_lm=use_lm, use_lexicon=use_lexicon)
@parameterized.expand(
[(True,), (False,)],
)
def test_shape(self, use_lexicon):
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
self.assertEqual(len(results), emissions.shape[0])
@parameterized.expand(
[(True,), (False,)],
)
def test_timesteps_shape(self, use_lexicon):
"""Each token should correspond with a timestep"""
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
for i in range(emissions.shape[0]):
result = results[i][0]
self.assertEqual(result.tokens.shape, result.timesteps.shape)
def test_no_lm_decoder(self):
"""Check that using no LM produces the same result as using an LM with 0 lm_weight"""
kenlm_decoder = self._get_decoder(lm_weight=0)
zerolm_decoder = self._get_decoder(use_lm=False)
emissions = self._get_emissions()
kenlm_results = kenlm_decoder(emissions)
zerolm_results = zerolm_decoder(emissions)
self.assertEqual(kenlm_results, zerolm_results)
def test_get_timesteps(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3])
decoder = self._get_decoder()
timesteps = decoder._get_timesteps(unprocessed_tokens)
expected = [0, 3, 7]
self.assertEqual(timesteps, expected)
def test_get_tokens_and_idxs(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3]) # ["f", "f", "-", "o", "o", "o", "-", "o"]
decoder = self._get_decoder()
token_ids = decoder._get_tokens(unprocessed_tokens)
tokens = decoder.idxs_to_tokens(token_ids)
expected_ids = [2, 3, 3]
self.assertEqual(token_ids, expected_ids)
expected_tokens = ["f", "o", "o"]
self.assertEqual(tokens, expected_tokens)
@parameterized.expand([(get_asset_path("decoder/tokens.txt"),), (["-", "|", "f", "o", "b", "a", "r"],)])
def test_index_to_tokens(self, tokens):
# decoder tokens: '-' '|' 'f' 'o' 'b' 'a' 'r'
decoder = self._get_decoder(tokens)
idxs = torch.LongTensor((1, 2, 1, 3, 5))
tokens = decoder.idxs_to_tokens(idxs)
expected_tokens = ["|", "f", "|", "o", "a"]
self.assertEqual(tokens, expected_tokens)
|
import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoCtcDecoder,
TempDirMixin,
TorchaudioTestCase,
)
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decoder(self, tokens=None, use_lm=True, use_lexicon=True, **kwargs):
from torchaudio.models.decoder import ctc_decoder
if use_lexicon:
lexicon_file = get_asset_path("decoder/lexicon.txt")
kenlm_file = get_asset_path("decoder/kenlm.arpa") if use_lm else None
else:
lexicon_file = None
kenlm_file = get_asset_path("decoder/kenlm_char.arpa") if use_lm else None
if tokens is None:
tokens = get_asset_path("decoder/tokens.txt")
return ctc_decoder(
lexicon=lexicon_file,
tokens=tokens,
lm=kenlm_file,
**kwargs,
)
def _get_emissions(self):
B, T, N = 4, 15, NUM_TOKENS
emissions = torch.rand(B, T, N)
return emissions
@parameterized.expand(
list(
itertools.product(
[get_asset_path("decoder/tokens.txt"), ["-", "|", "f", "o", "b", "a", "r"]],
[True, False],
[True, False],
)
),
)
def test_construct_decoder(self, tokens, use_lm, use_lexicon):
self._get_decoder(tokens=tokens, use_lm=use_lm, use_lexicon=use_lexicon)
@parameterized.expand(
[(True,), (False,)],
)
def test_shape(self, use_lexicon):
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
self.assertEqual(len(results), emissions.shape[0])
@parameterized.expand(
[(True,), (False,)],
)
def test_timesteps_shape(self, use_lexicon):
"""Each token should correspond with a timestep"""
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
for i in range(emissions.shape[0]):
result = results[i][0]
self.assertEqual(result.tokens.shape, result.timesteps.shape)
def test_no_lm_decoder(self):
"""Check that using no LM produces the same result as using an LM with 0 lm_weight"""
kenlm_decoder = self._get_decoder(lm_weight=0)
zerolm_decoder = self._get_decoder(use_lm=False)
emissions = self._get_emissions()
kenlm_results = kenlm_decoder(emissions)
zerolm_results = zerolm_decoder(emissions)
self.assertEqual(kenlm_results, zerolm_results)
def test_get_timesteps(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3])
decoder = self._get_decoder()
timesteps = decoder._get_timesteps(unprocessed_tokens)
expected = [0, 3, 7]
self.assertEqual(timesteps, expected)
def test_get_tokens_and_idxs(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3]) # ["f", "f", "-", "o", "o", "o", "-", "o"]
decoder = self._get_decoder()
token_ids = decoder._get_tokens(unprocessed_tokens)
tokens = decoder.idxs_to_tokens(token_ids)
expected_ids = [2, 3, 3]
self.assertEqual(token_ids, expected_ids)
expected_tokens = ["f", "o", "o"]
self.assertEqual(tokens, expected_tokens)
@parameterized.expand([(get_asset_path("decoder/tokens.txt"),), (["-", "|", "f", "o", "b", "a", "r"],)])
def test_index_to_tokens(self, tokens):
# decoder tokens: '-' '|' 'f' 'o' 'b' 'a' 'r'
decoder = self._get_decoder(tokens)
idxs = torch.LongTensor((1, 2, 1, 3, 5))
tokens = decoder.idxs_to_tokens(idxs)
expected_tokens = ["|", "f", "|", "o", "a"]
self.assertEqual(tokens, expected_tokens)
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.6.1,<0.7",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic>=2",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.6.1,<0.7",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder, models
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
asym = models.Asym(
{
"query": [
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
}
)
sparse_model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(
[{"doc": doc} for doc in corpus], convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode([{"query": query} for query in queries], convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
import random
import math
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class CNN(Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
config_keys: list[str] = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
config_file_name: str = "cnn_config.json"
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json")) as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
import re
from typing import Any, Optional
from langchain_text_splitters import RecursiveCharacterTextSplitter
class JSFrameworkTextSplitter(RecursiveCharacterTextSplitter):
"""Text splitter that handles React (JSX), Vue, and Svelte code.
This splitter extends RecursiveCharacterTextSplitter to handle
React (JSX), Vue, and Svelte code by:
1. Detecting and extracting custom component tags from the text
2. Using those tags as additional separators along with standard JS syntax
The splitter combines:
- Custom component tags as separators (e.g. <Component, <div)
- JavaScript syntax elements (function, const, if, etc)
- Standard text splitting on newlines
This allows chunks to break at natural boundaries in
React, Vue, and Svelte component code.
"""
def __init__(
self,
separators: Optional[list[str]] = None,
chunk_size: int = 2000,
chunk_overlap: int = 0,
**kwargs: Any,
) -> None:
"""Initialize the JS Framework text splitter.
Args:
separators: Optional list of custom separator strings to use
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
**kwargs: Additional arguments to pass to parent class
"""
super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
self._separators = separators or []
def split_text(self, text: str) -> list[str]:
"""Split text into chunks.
This method splits the text into chunks by:
- Extracting unique opening component tags using regex
- Creating separators list with extracted tags and JS separators
- Splitting the text using the separators by calling the parent class method
Args:
text: String containing code to split
Returns:
List of text chunks split on component and JS boundaries
"""
# Extract unique opening component tags using regex
# Regex to match opening tags, excluding self-closing tags
opening_tags = re.findall(r"<\s*([a-zA-Z0-9]+)[^>]*>", text)
component_tags = []
for tag in opening_tags:
if tag not in component_tags:
component_tags.append(tag)
component_separators = [f"<{tag}" for tag in component_tags]
js_separators = [
"\nexport ",
" export ",
"\nfunction ",
"\nasync function ",
" async function ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
" class ",
"\nif ",
" if ",
"\nfor ",
" for ",
"\nwhile ",
" while ",
"\nswitch ",
" switch ",
"\ncase ",
" case ",
"\ndefault ",
" default ",
]
separators = (
self._separators
+ js_separators
+ component_separators
+ ["<>", "\n\n", "&&\n", "||\n"]
)
self._separators = separators
chunks = super().split_text(text)
return chunks
|
import re
from typing import Any, List, Optional
from langchain_text_splitters import RecursiveCharacterTextSplitter
class JSFrameworkTextSplitter(RecursiveCharacterTextSplitter):
"""Text splitter that handles React (JSX), Vue, and Svelte code.
This splitter extends RecursiveCharacterTextSplitter to handle
React (JSX), Vue, and Svelte code by:
1. Detecting and extracting custom component tags from the text
2. Using those tags as additional separators along with standard JS syntax
The splitter combines:
- Custom component tags as separators (e.g. <Component, <div)
- JavaScript syntax elements (function, const, if, etc)
- Standard text splitting on newlines
This allows chunks to break at natural boundaries in
React, Vue, and Svelte component code.
"""
def __init__(
self,
separators: Optional[List[str]] = None,
chunk_size: int = 2000,
chunk_overlap: int = 0,
**kwargs: Any,
) -> None:
"""Initialize the JS Framework text splitter.
Args:
separators: Optional list of custom separator strings to use
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
**kwargs: Additional arguments to pass to parent class
"""
super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
self._separators = separators or []
def split_text(self, text: str) -> List[str]:
"""Split text into chunks.
This method splits the text into chunks by:
- Extracting unique opening component tags using regex
- Creating separators list with extracted tags and JS separators
- Splitting the text using the separators by calling the parent class method
Args:
text: String containing code to split
Returns:
List of text chunks split on component and JS boundaries
"""
# Extract unique opening component tags using regex
# Regex to match opening tags, excluding self-closing tags
opening_tags = re.findall(r"<\s*([a-zA-Z0-9]+)[^>]*>", text)
component_tags = []
for tag in opening_tags:
if tag not in component_tags:
component_tags.append(tag)
component_separators = [f"<{tag}" for tag in component_tags]
js_separators = [
"\nexport ",
" export ",
"\nfunction ",
"\nasync function ",
" async function ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
" class ",
"\nif ",
" if ",
"\nfor ",
" for ",
"\nwhile ",
" while ",
"\nswitch ",
" switch ",
"\ncase ",
" case ",
"\ndefault ",
" default ",
]
separators = (
self._separators
+ js_separators
+ component_separators
+ ["<>", "\n\n", "&&\n", "||\n"]
)
self._separators = separators
chunks = super().split_text(text)
return chunks
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import urllib
import warnings
from typing import Union
import torch
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
from mmengine.utils import scandir
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
"""Get the test dataset pipeline from entire config.
Args:
cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
file or a ``ConfigDict``.
Returns:
:obj:`ConfigDict`: the config of test dataset.
"""
if isinstance(cfg, str):
cfg = Config.fromfile(cfg)
def _get_test_pipeline_cfg(dataset_cfg):
if 'pipeline' in dataset_cfg:
return dataset_cfg.pipeline
# handle dataset wrapper
elif 'dataset' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.dataset)
# handle dataset wrappers like ConcatDataset
elif 'datasets' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
def get_file_list(source_root: str) -> [list, dict]:
"""Get file list.
Args:
source_root (str): image or video source path
Return:
source_file_path_list (list): A list for all source file.
source_type (dict): Source type: file or url or dir.
"""
is_dir = os.path.isdir(source_root)
is_url = source_root.startswith(('http:/', 'https:/'))
is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS
source_file_path_list = []
if is_dir:
# when input source is dir
for file in scandir(source_root, IMG_EXTENSIONS, recursive=True):
source_file_path_list.append(os.path.join(source_root, file))
elif is_url:
# when input source is url
filename = os.path.basename(
urllib.parse.unquote(source_root).split('?')[0])
file_save_path = os.path.join(os.getcwd(), filename)
print(f'Downloading source file to {file_save_path}')
torch.hub.download_url_to_file(source_root, file_save_path)
source_file_path_list = [file_save_path]
elif is_file:
# when input source is single image
source_file_path_list = [source_root]
else:
print('Cannot find image file.')
source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file)
return source_file_path_list, source_type
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from typing import Union
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
"""Get the test dataset pipeline from entire config.
Args:
cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
file or a ``ConfigDict``.
Returns:
:obj:`ConfigDict`: the config of test dataset.
"""
if isinstance(cfg, str):
cfg = Config.fromfile(cfg)
def _get_test_pipeline_cfg(dataset_cfg):
if 'pipeline' in dataset_cfg:
return dataset_cfg.pipeline
# handle dataset wrapper
elif 'dataset' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.dataset)
# handle dataset wrappers like ConcatDataset
elif 'datasets' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_data_element import BaseDataElement
from .instance_data import InstanceData
from .label_data import LabelData
from .pixel_data import PixelData
from .sampler import DefaultSampler, InfiniteSampler
from .utils import pseudo_collate, worker_init_fn
__all__ = [
'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_init_fn',
'pseudo_collate', 'InstanceData', 'LabelData', 'PixelData'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_data_element import BaseDataElement
from .instance_data import InstanceData
from .sampler import DefaultSampler, InfiniteSampler
from .utils import pseudo_collate, worker_init_fn
__all__ = [
'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_init_fn',
'pseudo_collate', 'InstanceData'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparse_sigmoid
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), document_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS, _load_library
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
try:
_load_library("Decoder")
_HAS_GPU_VIDEO_DECODER = True
except (ImportError, OSError, ModuleNotFoundError):
_HAS_GPU_VIDEO_DECODER = False
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not _HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
|
import os
import warnings
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
warnings.warn(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
|
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
class ReshapeTest(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_reshape(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, 4)},
input_shape=(3, 8),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (1, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 1, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
def test_reshape_with_dynamic_batch_size(self):
input_layer = layers.Input(shape=(2, 4))
reshaped = layers.Reshape((8,))(input_layer)
self.assertEqual(reshaped.shape, (None, 8))
def test_reshape_with_dynamic_batch_size_and_minus_one(self):
input = KerasTensor((None, 6, 4))
layer = layers.Reshape((-1, 8))
layer.build(input.shape)
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (None, 3, 8))
def test_reshape_with_dynamic_dim_and_minus_one(self):
input = KerasTensor((4, 6, None, 3))
layer = layers.Reshape((-1, 3))
layer.build(input.shape)
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (4, None, 3))
def test_reshape_sets_static_shape(self):
input_layer = layers.Input(batch_shape=(2, None))
reshaped = layers.Reshape((3, 5))(input_layer)
# Also make sure the batch dim is not lost after reshape.
self.assertEqual(reshaped.shape, (2, 3, 5))
|
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
class ReshapeTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_reshape(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, 4)},
input_shape=(3, 8),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (1, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 1, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
def test_reshape_with_dynamic_batch_size(self):
input_layer = layers.Input(shape=(2, 4))
reshaped = layers.Reshape((8,))(input_layer)
self.assertEqual(reshaped.shape, (None, 8))
def test_reshape_with_dynamic_batch_size_and_minus_one(self):
input = KerasTensor((None, 6, 4))
layer = layers.Reshape((-1, 8))
layer.build(input.shape)
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (None, 3, 8))
def test_reshape_with_dynamic_dim_and_minus_one(self):
input = KerasTensor((4, 6, None, 3))
layer = layers.Reshape((-1, 3))
layer.build(input.shape)
reshaped = backend.compute_output_spec(layer.__call__, input)
self.assertEqual(reshaped.shape, (4, None, 3))
def test_reshape_sets_static_shape(self):
input_layer = layers.Input(batch_shape=(2, None))
reshaped = layers.Reshape((3, 5))(input_layer)
# Also make sure the batch dim is not lost after reshape.
self.assertEqual(reshaped.shape, (2, 3, 5))
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
|
import enum
import typing
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
class WsMessage(pydantic.BaseModel):
method: Methods
data: typing.Dict[str, typing.Any] | list[typing.Any] | None = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: typing.List[APIKeyPermission]
description: typing.Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: typing.List[APIKeyPermission]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from mmengine.hooks import EMAHook
from mmengine.model import BaseModel, ExponentialMovingAverage
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS, MODEL_WRAPPERS
from mmengine.runner import Runner
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestEMAHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_ema_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel1().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook', )],
experiment_name='test1')
runner.train()
for hook in runner.hooks:
if isinstance(hook, EMAHook):
self.assertTrue(
isinstance(hook.ema_model, ExponentialMovingAverage))
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
self.assertTrue('ema_state_dict' in checkpoint)
self.assertTrue(checkpoint['ema_state_dict']['steps'] == 8)
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(BaseModel):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(ToyModel()),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test3')
runner.test()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from mmengine.hooks import EMAHook
from mmengine.model import ExponentialMovingAverage
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS, MODEL_WRAPPERS
from mmengine.runner import Runner
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, data_batch, return_loss=False):
inputs, labels = [], []
for x in data_batch:
inputs.append(x['inputs'])
labels.append(x['data_sample'])
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
inputs = torch.stack(inputs).to(device)
labels = torch.stack(labels).to(device)
outputs = self.linear(inputs)
if return_loss:
loss = (labels - outputs).sum()
outputs = dict(loss=loss, log_vars=dict(loss=loss.item()))
return outputs
else:
outputs = dict(log_vars=dict(a=1, b=0.5))
return outputs
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestEMAHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_ema_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook', )],
experiment_name='test1')
runner.train()
for hook in runner.hooks:
if isinstance(hook, EMAHook):
self.assertTrue(
isinstance(hook.ema_model, ExponentialMovingAverage))
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
self.assertTrue('ema_state_dict' in checkpoint)
self.assertTrue(checkpoint['ema_state_dict']['steps'] == 8)
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(model),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test3')
runner.test()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
try:
import torch_dipu # noqa: F401
IS_DIPU_AVAILABLE = True
except Exception:
IS_DIPU_AVAILABLE = False
try:
import torch_musa # noqa: F401
IS_MUSA_AVAILABLE = True
except Exception:
IS_MUSA_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def is_dipu_available() -> bool:
return IS_DIPU_AVAILABLE
def get_max_musa_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.musa.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.musa.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
# TODO:[email protected]: This function is not supported by musa yet.
# torch.musa.reset_peak_memory_stats()
return int(mem_mb.item())
def is_musa_available() -> bool:
return IS_MUSA_AVAILABLE
def is_npu_support_full_precision() -> bool:
"""Returns True if npu devices support full precision training."""
version_of_support_full_precision = 220
return IS_NPU_AVAILABLE and npu_utils.get_soc_version(
) >= version_of_support_full_precision
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
elif is_dipu_available():
DEVICE = 'dipu'
elif is_musa_available():
DEVICE = 'musa'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | musa | cpu.
"""
return DEVICE
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
try:
import torch_dipu # noqa: F401
IS_DIPU_AVAILABLE = True
except Exception:
IS_DIPU_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def is_dipu_available() -> bool:
return IS_DIPU_AVAILABLE
def is_npu_support_full_precision() -> bool:
"""Returns True if npu devices support full precision training."""
version_of_support_full_precision = 220
return IS_NPU_AVAILABLE and npu_utils.get_soc_version(
) >= version_of_support_full_precision
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
elif is_dipu_available():
DEVICE = 'dipu'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
return DEVICE
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(
args: 'Namespace', gateway_load_balancer: bool = False
) -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:param gateway_load_balancer: flag indicating if this Pod is supposed to be a Gateway Load Balancer
:return: the created Deployment
"""
# copy to update but forward original
cargs = deepcopy(args)
cargs.gateway_load_balancer = gateway_load_balancer
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(cargs)
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace', gateway_load_balancer: bool = False) -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:param gateway_load_balancer: flag indicating if this Pod is supposed to be a Gateway Load Balancer
:return: the created Deployment
"""
# copy to update but forward original
cargs = deepcopy(args)
cargs.gateway_load_balancer = gateway_load_balancer
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(cargs)
|
import json
from typing import Any, Type, TypeVar, overload
import jsonschema
from fastapi.encoders import jsonable_encoder
from .type import type_match
def to_dict(data) -> dict:
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(jsonable_encoder(data))
T = TypeVar("T")
@overload
def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str, *args, **kwargs) -> Any: ...
def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any:
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
def validate_with_jsonschema(
schema: dict[str, Any], data: dict[str, Any]
) -> str | None:
"""
Validate the data against the schema.
Returns the validation error message if the data does not match the schema.
"""
try:
jsonschema.validate(data, schema)
return None
except jsonschema.ValidationError as e:
return str(e)
|
import json
from typing import Any, Type, TypeVar, overload
from fastapi.encoders import jsonable_encoder
from .type import type_match
def to_dict(data) -> dict:
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(jsonable_encoder(data))
T = TypeVar("T")
@overload
def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str, *args, **kwargs) -> Any: ...
def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any:
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
|
import warnings
from typing import Callable, Union
from torch.ao.pruning.sparsifier.base_sparsifier import BaseSparsifier
from .base_scheduler import BaseScheduler
__all__ = ["LambdaSL"]
class LambdaSL(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsifier.
sl_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in sparsifier.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
Example:
>>> # Assuming sparsifier has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> lambda2 = lambda epoch: 0.95**epoch
>>> # xdoctest: +SKIP
>>> scheduler = LambdaSL(sparsifier, sl_lambda=[lambda1, lambda2])
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(
self,
sparsifier: BaseSparsifier,
sl_lambda: Union[Callable[[int], float], list[Callable[[int], float]]],
last_epoch: int = -1,
verbose: bool = False,
) -> None:
self.sparsifier = sparsifier
if not isinstance(sl_lambda, list) and not isinstance(sl_lambda, tuple):
self.sl_lambdas = [sl_lambda] * len(sparsifier.groups)
else:
if len(sl_lambda) != len(sparsifier.groups):
raise ValueError(
f"Expected {len(sparsifier.groups)} lr_lambdas, but got {len(sl_lambda)}"
)
self.sl_lambdas = list(sl_lambda)
super().__init__(sparsifier, last_epoch, verbose) # type: ignore[no-untyped-call]
def get_sl(self) -> list[float]:
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
"please use `get_last_sl()`."
)
return [
base_sl * lmbda(self.last_epoch)
for lmbda, base_sl in zip(self.sl_lambdas, self.base_sl)
]
|
# mypy: allow-untyped-defs
import warnings
from .base_scheduler import BaseScheduler
__all__ = ["LambdaSL"]
class LambdaSL(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsifier.
sl_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in sparsifier.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
Example:
>>> # Assuming sparsifier has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> lambda2 = lambda epoch: 0.95**epoch
>>> # xdoctest: +SKIP
>>> scheduler = LambdaSL(sparsifier, sl_lambda=[lambda1, lambda2])
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, sparsifier, sl_lambda, last_epoch=-1, verbose=False):
self.sparsifier = sparsifier
if not isinstance(sl_lambda, list) and not isinstance(sl_lambda, tuple):
self.sl_lambdas = [sl_lambda] * len(sparsifier.groups)
else:
if len(sl_lambda) != len(sparsifier.groups):
raise ValueError(
f"Expected {len(sparsifier.groups)} lr_lambdas, but got {len(sl_lambda)}"
)
self.sl_lambdas = list(sl_lambda)
super().__init__(sparsifier, last_epoch, verbose)
def get_sl(self):
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
"please use `get_last_sl()`."
)
return [
base_sl * lmbda(self.last_epoch)
for lmbda, base_sl in zip(self.sl_lambdas, self.base_sl)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.15.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) Open-MMLab. All rights reserved.
__version__ = '2.15.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import pytest
import torch
import numpy as np
import torchvision.models.video as models
from torchvision import transforms
from jina import Document, DocumentArray, Executor
from ...video_torch_encoder import (
VideoTorchEncoder,
ConvertFHWCtoFCHW,
ConvertFCHWtoCFHW,
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=False)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=True)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(use_default_preprocessing=True, model_name=model_name)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
import torch
import numpy as np
import torchvision.models.video as models
from torchvision import transforms
from jina import Document, DocumentArray
try:
from video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW
except:
from ...video_torch_encoder import VideoTorchEncoder, ConvertFHWCtoFCHW, ConvertFCHWtoCFHW
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=False)
da = DocumentArray([Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)])
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=True)
da = DocumentArray([Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)])
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray([Document(blob=video.detach().numpy()) for video in kinects_videos])
ex = VideoTorchEncoder(use_default_preprocessing=True, model_name=model_name)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose([
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW()
])
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(doc.embedding, expected_torch_embedding.detach().numpy())
|
import types
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
__all__ = []
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import types
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
__all__ = []
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
_base_ = './decoupled-solo_r50_fpn_3x_coco.py'
# model settings
model = dict(
mask_head=dict(
type='DecoupledSOLOLightHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384),
(852, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(852, 512), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './decoupled-solo_r50_fpn_3x_coco.py'
# model settings
model = dict(
mask_head=dict(
type='DecoupledSOLOLightHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384),
(852, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(852, 512), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
import warnings
from typing import Any, List
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: torch.Tensor) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[BETA] [DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: torch.Tensor) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
"""JSON Reader."""
import re
import defusedxml.ElementTree as ET # safe XML parsing
import xml.etree.ElementTree as _XmlET # for type annotations only
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
def _get_leaf_nodes_up_to_level(
root: _XmlET.Element, level: int
) -> List[_XmlET.Element]:
"""
Get collection of nodes up to certain level including leaf nodes.
Args:
root (ET.Element): XML Root Element
level (int): Levels to traverse in the tree
Returns:
List[ET.Element]: List of target nodes
"""
def traverse(current_node, current_level):
if len(current_node) == 0 or level == current_level:
# Keep leaf nodes and target level nodes
nodes.append(current_node)
elif current_level < level:
# Move to the next level
for child in current_node:
traverse(child, current_level + 1)
nodes = []
traverse(root, 0)
return nodes
class XMLReader(BaseReader):
"""
XML reader.
Reads XML documents with options to help suss out relationships between nodes.
Args:
tree_level_split (int): From which level in the xml tree we split documents,
the default level is the root which is level 0
"""
def __init__(self, tree_level_split: Optional[int] = 0) -> None:
"""Initialize with arguments."""
super().__init__()
self.tree_level_split = tree_level_split
def _parse_xmlelt_to_document(
self, root: _XmlET.Element, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Parse the xml object into a list of Documents.
Args:
root: The XML Element to be converted.
extra_info (Optional[Dict]): Additional information. Default is None.
Returns:
Document: The documents.
"""
nodes = _get_leaf_nodes_up_to_level(root, self.tree_level_split)
documents = []
for node in nodes:
content = ET.tostring(node, encoding="utf8").decode("utf-8")
content = re.sub(r"^<\?xml.*", "", content)
content = content.strip()
documents.append(Document(text=content, extra_info=extra_info or {}))
return documents
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""
Load data from the input file.
Args:
file (Path): Path to the input file.
extra_info (Optional[Dict]): Additional information. Default is None.
Returns:
List[Document]: List of documents.
"""
if not isinstance(file, Path):
file = Path(file)
tree = ET.parse(file)
return self._parse_xmlelt_to_document(tree.getroot(), extra_info)
|
"""JSON Reader."""
import re
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
def _get_leaf_nodes_up_to_level(root: ET.Element, level: int) -> List[ET.Element]:
"""
Get collection of nodes up to certain level including leaf nodes.
Args:
root (ET.Element): XML Root Element
level (int): Levels to traverse in the tree
Returns:
List[ET.Element]: List of target nodes
"""
def traverse(current_node, current_level):
if len(current_node) == 0 or level == current_level:
# Keep leaf nodes and target level nodes
nodes.append(current_node)
elif current_level < level:
# Move to the next level
for child in current_node:
traverse(child, current_level + 1)
nodes = []
traverse(root, 0)
return nodes
class XMLReader(BaseReader):
"""
XML reader.
Reads XML documents with options to help suss out relationships between nodes.
Args:
tree_level_split (int): From which level in the xml tree we split documents,
the default level is the root which is level 0
"""
def __init__(self, tree_level_split: Optional[int] = 0) -> None:
"""Initialize with arguments."""
super().__init__()
self.tree_level_split = tree_level_split
def _parse_xmlelt_to_document(
self, root: ET.Element, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Parse the xml object into a list of Documents.
Args:
root: The XML Element to be converted.
extra_info (Optional[Dict]): Additional information. Default is None.
Returns:
Document: The documents.
"""
nodes = _get_leaf_nodes_up_to_level(root, self.tree_level_split)
documents = []
for node in nodes:
content = ET.tostring(node, encoding="utf8").decode("utf-8")
content = re.sub(r"^<\?xml.*", "", content)
content = content.strip()
documents.append(Document(text=content, extra_info=extra_info or {}))
return documents
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""
Load data from the input file.
Args:
file (Path): Path to the input file.
extra_info (Optional[Dict]): Additional information. Default is None.
Returns:
List[Document]: List of documents.
"""
if not isinstance(file, Path):
file = Path(file)
tree = ET.parse(file)
return self._parse_xmlelt_to_document(tree.getroot(), extra_info)
|
from __future__ import annotations
from copy import deepcopy
import pytest
from sentence_transformers import CrossEncoder
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture(scope="session")
def _reranker_bert_tiny_model() -> CrossEncoder:
return CrossEncoder("cross-encoder-testing/reranker-bert-tiny-gooaq-bce")
@pytest.fixture()
def reranker_bert_tiny_model(_reranker_bert_tiny_model) -> CrossEncoder:
return deepcopy(_reranker_bert_tiny_model)
|
from __future__ import annotations
import pytest
from sentence_transformers import CrossEncoder
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def reranker_bert_tiny_model() -> CrossEncoder:
return CrossEncoder("cross-encoder-testing/reranker-bert-tiny-gooaq-bce")
@pytest.fixture(scope="session")
def reranker_bert_tiny_model_reused() -> CrossEncoder:
return CrossEncoder("cross-encoder-testing/reranker-bert-tiny-gooaq-bce")
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import _FillTypeJIT, Datapoint
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: _FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: _FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.20.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.20.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Nystromformer checkpoints from the original repository."""
import argparse
import torch
from transformers import NystromformerConfig, NystromformerForMaskedLM
def rename_key(orig_key):
if "model" in orig_key:
orig_key = orig_key.replace("model.", "")
if "norm1" in orig_key:
orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
if "norm2" in orig_key:
orig_key = orig_key.replace("norm2", "output.LayerNorm")
if "norm" in orig_key:
orig_key = orig_key.replace("norm", "LayerNorm")
if "transformer" in orig_key:
layer_num = orig_key.split(".")[0].split("_")[-1]
orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
orig_key = orig_key.replace("mha.attn", "attention.self")
if "mha" in orig_key:
orig_key = orig_key.replace("mha", "attention")
if "W_q" in orig_key:
orig_key = orig_key.replace("W_q", "self.query")
if "W_k" in orig_key:
orig_key = orig_key.replace("W_k", "self.key")
if "W_v" in orig_key:
orig_key = orig_key.replace("W_v", "self.value")
if "ff1" in orig_key:
orig_key = orig_key.replace("ff1", "intermediate.dense")
if "ff2" in orig_key:
orig_key = orig_key.replace("ff2", "output.dense")
if "ff" in orig_key:
orig_key = orig_key.replace("ff", "output.dense")
if "mlm_class" in orig_key:
orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
if "mlm" in orig_key:
orig_key = orig_key.replace("mlm", "cls.predictions.transform")
if "cls" not in orig_key:
orig_key = "nystromformer." + orig_key
return orig_key
def convert_checkpoint_helper(config, orig_state_dict):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if ("pooler" in key) or ("sen_class" in key) or ("conv.bias" in key):
continue
else:
orig_state_dict[rename_key(key)] = val
orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
orig_state_dict["nystromformer.embeddings.position_ids"] = (
torch.arange(config.max_position_embeddings).expand((1, -1)) + 2
)
return orig_state_dict
def convert_nystromformer_checkpoint(checkpoint_path, nystromformer_config_file, pytorch_dump_path):
orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"]
config = NystromformerConfig.from_json_file(nystromformer_config_file)
model = NystromformerForMaskedLM(config)
new_state_dict = convert_checkpoint_helper(config, orig_state_dict)
model.load_state_dict(new_state_dict)
model.eval()
model.save_pretrained(pytorch_dump_path)
print(f"Checkpoint successfully converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to Nystromformer pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for Nystromformer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_nystromformer_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Nystromformer checkpoints from the original repository."""
import argparse
import torch
from transformers import NystromformerConfig, NystromformerForMaskedLM
def rename_key(orig_key):
if "model" in orig_key:
orig_key = orig_key.replace("model.", "")
if "norm1" in orig_key:
orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
if "norm2" in orig_key:
orig_key = orig_key.replace("norm2", "output.LayerNorm")
if "norm" in orig_key:
orig_key = orig_key.replace("norm", "LayerNorm")
if "transformer" in orig_key:
layer_num = orig_key.split(".")[0].split("_")[-1]
orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
orig_key = orig_key.replace("mha.attn", "attention.self")
if "mha" in orig_key:
orig_key = orig_key.replace("mha", "attention")
if "W_q" in orig_key:
orig_key = orig_key.replace("W_q", "self.query")
if "W_k" in orig_key:
orig_key = orig_key.replace("W_k", "self.key")
if "W_v" in orig_key:
orig_key = orig_key.replace("W_v", "self.value")
if "ff1" in orig_key:
orig_key = orig_key.replace("ff1", "intermediate.dense")
if "ff2" in orig_key:
orig_key = orig_key.replace("ff2", "output.dense")
if "ff" in orig_key:
orig_key = orig_key.replace("ff", "output.dense")
if "mlm_class" in orig_key:
orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
if "mlm" in orig_key:
orig_key = orig_key.replace("mlm", "cls.predictions.transform")
if "cls" not in orig_key:
orig_key = "nystromformer." + orig_key
return orig_key
def convert_checkpoint_helper(config, orig_state_dict):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if ("pooler" in key) or ("sen_class" in key) or ("conv.bias" in key):
continue
else:
orig_state_dict[rename_key(key)] = val
orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
orig_state_dict["nystromformer.embeddings.position_ids"] = (
torch.arange(config.max_position_embeddings).expand((1, -1)) + 2
)
return orig_state_dict
def convert_nystromformer_checkpoint(checkpoint_path, nystromformer_config_file, pytorch_dump_path):
orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"]
config = NystromformerConfig.from_json_file(nystromformer_config_file)
model = NystromformerForMaskedLM(config)
new_state_dict = convert_checkpoint_helper(config, orig_state_dict)
model.load_state_dict(new_state_dict)
model.eval()
model.save_pretrained(pytorch_dump_path)
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to Nystromformer pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for Nystromformer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_nystromformer_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel, ValidationError
from typing_extensions import TypedDict
from docarray import BaseDoc, DocArray
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import (
create_doc,
create_doc_from_dict,
create_doc_from_typeddict,
)
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDoc):
text: str
images: DocArray[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocArray[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocArray)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDoc)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, AudioDoc)
def test_create_doc_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_doc_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_doc_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDoc)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_doc_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, AudioDoc)
def test_create_doc_from_dict():
data_dict = {
'image': ImageDoc(tensor=np.random.rand(3, 224, 224)),
'text': TextDoc(text='hello'),
'id': 123,
}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=TextDoc(text='hey'),
id=111,
)
assert isinstance(doc, BaseDoc)
assert isinstance(doc.text, TextDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.id, int)
# Create a doc with an incorrect type
with pytest.raises(ValidationError):
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=['some', 'text'], # should be TextDoc
id=111,
)
# Handle empty data_dict
with pytest.raises(ValueError):
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict={})
# Data with a None value
data_dict = {'text': 'some text', 'other': None}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc1 = MyDoc(text='txt', other=10)
doc2 = MyDoc(text='txt', other='also text')
assert isinstance(doc1, BaseDoc) and isinstance(doc2, BaseDoc)
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel, ValidationError
from typing_extensions import TypedDict
from docarray import BaseDocument, DocumentArray
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import (
create_doc,
create_doc_from_typeddict,
create_doc_from_dict,
)
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDocument):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDocument):
text: str
images: DocumentArray[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocumentArray[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocumentArray)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDocument)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDocument)
assert issubclass(MyAudio, AudioDoc)
def test_create_doc_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_doc_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_doc_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDocument)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_doc_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDocument)
assert issubclass(Doc, AudioDoc)
def test_create_doc_from_dict():
data_dict = {
'image': ImageDoc(tensor=np.random.rand(3, 224, 224)),
'text': TextDoc(text='hello'),
'id': 123,
}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDocument)
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=TextDoc(text='hey'),
id=111,
)
assert isinstance(doc, BaseDocument)
assert isinstance(doc.text, TextDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.id, int)
# Create a doc with an incorrect type
with pytest.raises(ValidationError):
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=['some', 'text'], # should be TextDoc
id=111,
)
# Handle empty data_dict
with pytest.raises(ValueError):
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict={})
# Data with a None value
data_dict = {'text': 'some text', 'other': None}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDocument)
doc1 = MyDoc(text='txt', other=10)
doc2 = MyDoc(text='txt', other='also text')
assert isinstance(doc1, BaseDocument) and isinstance(doc2, BaseDocument)
|
import os
import numpy as np
import pytest as pytest
from jina import Document, DocumentArray
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_connection(indexer, docker_compose):
assert indexer.hostname == '127.0.0.1'
assert indexer.get_query_handler().ping()
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
redis_keys = qh.keys()
assert all(doc.id.encode() in redis_keys for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content == doc.content for query_doc, doc in zip(query, docs))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert_with_duplicates(indexer, docs, docker_compose):
# insert same docs twice
indexer.upsert(docs, parameters={})
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
assert len(qh.keys()) == 5
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add_existing(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
existing_doc = Document(id=docs[0].id, content='new content')
indexer.add(DocumentArray([existing_doc]), parameters={})
with indexer.get_query_handler() as redis_handler:
result = redis_handler.get(existing_doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content != existing_doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
for doc in docs:
doc.content = 'new ' + doc.content
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
for doc in docs:
result = redis_handler.get(doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content == doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update_non_existing(indexer, docs, docker_compose):
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() not in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search_not_found(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=docs[0].id), Document()])
indexer.search(query, parameters={})
assert query[0].content == docs[0].content
assert query[1].content is None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_delete(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
indexer.delete(docs[:2], parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content is None for query_doc in query[:2])
assert all(query_doc.content == doc.content for query_doc, doc in zip(query[2:], docs[2:]))
qh = indexer.get_query_handler()
assert len(qh.keys()) == 3
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_return_embeddings(indexer, docker_compose):
doc = Document(embedding=np.random.rand(1, 10))
da = DocumentArray([doc])
query1 = DocumentArray([Document(id=doc.id)])
indexer.add(da, parameters={})
indexer.search(query1, parameters={})
assert query1[0].embedding is not None
assert query1[0].embedding.shape == (1, 10)
query2 = DocumentArray([Document(id=doc.id)])
indexer.search(query2, parameters={"return_embeddings": False})
assert query2[0].embedding is None
|
import os
import pytest as pytest
from jina import Document, DocumentArray
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_connection(indexer, docker_compose):
assert indexer.hostname == '127.0.0.1'
assert indexer.get_query_handler().ping()
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
redis_keys = qh.keys()
assert all(doc.id.encode() in redis_keys for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content == doc.content for query_doc, doc in zip(query, docs))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upsert_with_duplicates(indexer, docs, docker_compose):
# insert same docs twice
indexer.upsert(docs, parameters={})
indexer.upsert(docs, parameters={})
qh = indexer.get_query_handler()
assert len(qh.keys()) == 5
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_add_existing(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
existing_doc = Document(id=docs[0].id, content='new content')
indexer.add(DocumentArray([existing_doc]), parameters={})
with indexer.get_query_handler() as redis_handler:
result = redis_handler.get(existing_doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content != existing_doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update(indexer, docs, docker_compose):
indexer.add(docs, parameters={})
for doc in docs:
doc.content = 'new ' + doc.content
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
for doc in docs:
result = redis_handler.get(doc.id)
data = bytes(result)
retrieved_doc = Document(data)
assert retrieved_doc.content == doc.content
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_update_non_existing(indexer, docs, docker_compose):
indexer.update(docs, parameters={})
with indexer.get_query_handler() as redis_handler:
assert all(doc.id.encode() not in redis_handler.keys() for doc in docs)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_search_not_found(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
query = DocumentArray([Document(id=docs[0].id), Document()])
indexer.search(query, parameters={})
assert query[0].content == docs[0].content
assert query[1].content is None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_delete(indexer, docs, docker_compose):
indexer.upsert(docs, parameters={})
indexer.delete(docs[:2], parameters={})
query = DocumentArray([Document(id=doc.id) for doc in docs])
indexer.search(query, parameters={})
assert all(query_doc.content is None for query_doc in query[:2])
assert all(query_doc.content == doc.content for query_doc, doc in zip(query[2:], docs[2:]))
qh = indexer.get_query_handler()
assert len(qh.keys()) == 3
|
import asyncio
from math import ceil
import pytest
from docarray import Document
from jina.clients.request.asyncio import request_generator
NUM_INPUT_DOCS = 30
REQUEST_SIZE = 10
@pytest.mark.asyncio
async def test_asyncio_req_generator():
async def input_function():
data = [Document() for _ in range(NUM_INPUT_DOCS)]
for doc in data:
yield doc
generator = request_generator('/', input_function(), request_size=REQUEST_SIZE)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == REQUEST_SIZE
await asyncio.sleep(0.1)
assert i == ceil(NUM_INPUT_DOCS / REQUEST_SIZE)
@pytest.mark.asyncio
async def test_asyncio_req_generator_empty_inputs():
generator = request_generator('/', None)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == 0
await asyncio.sleep(0.1)
assert i == 1
def test_asyncio_bad_input_generator():
# exception not handled
data = ['text' for _ in range(20)]
request_generator('/', data, request_size=10)
@pytest.mark.asyncio
async def test_asyncio_bad_input_generator2():
async def input_function():
for _ in range(NUM_INPUT_DOCS):
yield 42
with pytest.raises(TypeError):
async for req in request_generator(
exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE
):
print(req.docs.summary())
async def input_function():
yield Document()
yield 42
with pytest.raises(ValueError):
async for req in request_generator(
exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE
):
print(req.docs.summary())
|
import asyncio
from math import ceil
import pytest
from docarray import Document
from jina.clients.request.asyncio import request_generator
NUM_INPUT_DOCS = 30
REQUEST_SIZE = 10
@pytest.mark.asyncio
async def test_asyncio_req_generator():
async def input_function():
data = [Document() for _ in range(NUM_INPUT_DOCS)]
for doc in data:
yield doc
generator = request_generator('/', input_function(), request_size=REQUEST_SIZE)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == REQUEST_SIZE
await asyncio.sleep(0.1)
assert i == ceil(NUM_INPUT_DOCS / REQUEST_SIZE)
@pytest.mark.asyncio
async def test_asyncio_req_generator_empty_inputs():
generator = request_generator('/', None)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == 0
await asyncio.sleep(0.1)
assert i == 1
def test_asyncio_bad_input_generator():
# exception not handled
data = ['text' for _ in range(20)]
request_generator('/', data, request_size=10)
@pytest.mark.asyncio
async def test_asyncio_bad_input_generator2():
async def input_function():
for _ in range(NUM_INPUT_DOCS):
yield 42
with pytest.raises(TypeError):
async for req in request_generator(exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE):
print(req.docs.summary())
async def input_function():
yield Document()
yield 42
with pytest.raises(ValueError):
async for req in request_generator(exec_endpoint='/', data=input_function(), request_size=REQUEST_SIZE):
print(req.docs.summary())
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/fuse_modules.py`, while adding an import statement
here.
"""
# TODO: These functions are not used outside the `fuse_modules.py`
# Keeping here for now, need to remove them later.
from torch.ao.quantization.fuse_modules import (
_fuse_modules,
_get_module,
_set_module,
fuse_known_modules,
fuse_modules,
get_fuser_method,
)
# for backward compatibility
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn, fuse_conv_bn_relu
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/fuse_modules.py`, while adding an import statement
here.
"""
# TODO: These functions are not used outside the `fuse_modules.py`
# Keeping here for now, need to remove them later.
from torch.ao.quantization.fuse_modules import (
_fuse_modules,
_get_module,
_set_module,
fuse_known_modules,
fuse_modules,
get_fuser_method,
)
# for backward compatiblity
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn, fuse_conv_bn_relu
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDoc):
integer: int
inner_list: List
class MMDoc(BaseDoc):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[ImageDoc] = None
matches: Optional[DocList] = None
matches_with_same_id: Optional[DocList] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocList[MMDoc]([MMDoc()]),
matches_with_same_id=DocList[MMDoc](
[MMDoc(id='a', matches=DocList[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocList[MMDoc]([MMDoc()]),
matches_with_same_id=DocList[MMDoc](
[MMDoc(id='a', matches=DocList[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocList[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocList[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocList[MMDoc]([doc1, MMDoc()])
da2 = DocList[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocList[MMDoc]([doc1, MMDoc()])
da2 = DocList[MMDoc]([MMDoc(), doc2])
da3 = DocList[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
def test_update_ndarray():
from docarray.typing import NdArray
import numpy as np
class MyDoc(BaseDoc):
embedding: NdArray[128]
embedding1 = np.random.rand(128)
embedding2 = np.random.rand(128)
doc1 = MyDoc(id='0', embedding=embedding1)
doc2 = MyDoc(id='0', embedding=embedding2)
doc1.update(doc2)
assert (doc1.embedding == embedding2).all()
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDoc):
integer: int
inner_list: List
class MMDoc(BaseDoc):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[ImageDoc] = None
matches: Optional[DocList] = None
matches_with_same_id: Optional[DocList] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocList[MMDoc]([MMDoc()]),
matches_with_same_id=DocList[MMDoc](
[MMDoc(id='a', matches=DocList[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocList[MMDoc]([MMDoc()]),
matches_with_same_id=DocList[MMDoc](
[MMDoc(id='a', matches=DocList[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocList[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocList[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocList[MMDoc]([doc1, MMDoc()])
da2 = DocList[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocList[MMDoc]([doc1, MMDoc()])
da2 = DocList[MMDoc]([MMDoc(), doc2])
da3 = DocList[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.dataforseo_api_search.tool import (
DataForSeoAPISearchResults,
DataForSeoAPISearchRun,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DataForSeoAPISearchRun": "langchain_community.tools.dataforseo_api_search.tool",
"DataForSeoAPISearchResults": (
"langchain_community.tools.dataforseo_api_search.tool"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DataForSeoAPISearchResults",
"DataForSeoAPISearchRun",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.dataforseo_api_search.tool import (
DataForSeoAPISearchResults,
DataForSeoAPISearchRun,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DataForSeoAPISearchRun": "langchain_community.tools.dataforseo_api_search.tool",
"DataForSeoAPISearchResults": (
"langchain_community.tools.dataforseo_api_search.tool"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DataForSeoAPISearchRun",
"DataForSeoAPISearchResults",
]
|
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
import respx
@pytest.fixture(autouse=True)
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get(
"https://test_url/v1/models",
json={
"data": [
{"id": "model1"},
]
},
)
@pytest.mark.integration()
def test_available_models(mode: dict) -> None:
models = NVIDIARerank(**mode).available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
from requests_mock import Mocker
@pytest.fixture(autouse=True)
def mock_local_models(requests_mock: Mocker) -> None:
requests_mock.get(
"https://test_url/v1/models",
json={
"data": [
{"id": "model1"},
]
},
)
@pytest.mark.integration()
def test_available_models(mode: dict) -> None:
models = NVIDIARerank(**mode).available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES:
yaml = _get_yaml(template, params)
if params.get('device_plugins'):
yaml = _get_deployment_with_device_plugins(yaml, params)
if params.get('env_from_secret'):
yaml = _get_deployment_with_env_secret(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path, encoding='utf-8') as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path, encoding='utf-8') as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = str(value)
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
def _get_deployment_with_env_secret(deployment: Dict, params: Dict) -> Dict:
for k,v in params['env_from_secret'].items():
env_var = {}
env_var['name'] = k
env_var['valueFrom'] = {'secretKeyRef': {'name': v['name'], 'key': v['key']}}
deployment['spec']['template']['spec']['containers'][0]['env'].append(env_var)
return deployment
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES:
yaml = _get_yaml(template, params)
if params.get('device_plugins'):
yaml = _get_deployment_with_device_plugins(yaml, params)
if params.get('env_from_secret'):
yaml = _get_deployment_with_env_secret(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = str(value)
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
def _get_deployment_with_env_secret(deployment: Dict, params: Dict) -> Dict:
for k,v in params['env_from_secret'].items():
env_var = {}
env_var['name'] = k
env_var['valueFrom'] = {'secretKeyRef': {'name': v['name'], 'key': v['key']}}
deployment['spec']['template']['spec']['containers'][0]['env'].append(env_var)
return deployment
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch.nn as nn
from transformers.utils import logging
logger = logging.get_logger(__name__)
class GradientCheckpointingLayer(nn.Module):
"""Base class for layers with gradient checkpointing.
This class enables gradient checkpointing functionality for a layer. By default, gradient checkpointing is disabled
(`gradient_checkpointing = False`). When `model.set_gradient_checkpointing()` is called, gradient checkpointing is
enabled by setting `gradient_checkpointing = True` and assigning a checkpointing function to `_gradient_checkpointing_func`.
Important:
When using gradient checkpointing with `use_reentrant=True`, inputs that require gradients (e.g. hidden states)
must be passed as positional arguments (`*args`) rather than keyword arguments to properly propagate gradients.
Example:
```python
>>> # Correct - hidden_states passed as positional arg
>>> out = self.layer(hidden_states, attention_mask=attention_mask)
>>> # Incorrect - hidden_states passed as keyword arg
>>> out = self.layer(hidden_states=hidden_states, attention_mask=attention_mask)
```
"""
gradient_checkpointing = False
def __call__(self, *args, **kwargs):
if self.gradient_checkpointing and self.training:
do_warn = False
layer_name = self.__class__.__name__
message = f"Caching is incompatible with gradient checkpointing in {layer_name}. Setting"
if "use_cache" in kwargs and kwargs["use_cache"]:
kwargs["use_cache"] = False
message += " `use_cache=False`,"
do_warn = True
# different names for the same thing in different layers
if "past_key_value" in kwargs and kwargs["past_key_value"] is not None:
kwargs["past_key_value"] = None
message += " `past_key_value=None`,"
do_warn = True
if "past_key_values" in kwargs and kwargs["past_key_values"] is not None:
kwargs["past_key_values"] = None
message += " `past_key_values=None`,"
do_warn = True
if "layer_past" in kwargs and kwargs["layer_past"] is not None:
kwargs["layer_past"] = None
message += " `layer_past=None`,"
do_warn = True
# warn if anything was changed
if do_warn:
message = message.rstrip(",") + "."
logger.warning(message)
return self._gradient_checkpointing_func(partial(super().__call__, **kwargs), *args)
return super().__call__(*args, **kwargs)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch.nn as nn
class GradientCheckpointingLayer(nn.Module):
"""Base class for layers with gradient checkpointing.
This class enables gradient checkpointing functionality for a layer. By default, gradient checkpointing is disabled
(`gradient_checkpointing = False`). When `model.set_gradient_checkpointing()` is called, gradient checkpointing is
enabled by setting `gradient_checkpointing = True` and assigning a checkpointing function to `_gradient_checkpointing_func`.
Important:
When using gradient checkpointing with `use_reentrant=True`, inputs that require gradients (e.g. hidden states)
must be passed as positional arguments (`*args`) rather than keyword arguments to properly propagate gradients.
Example:
```python
>>> # Correct - hidden_states passed as positional arg
>>> out = self.layer(hidden_states, attention_mask=attention_mask)
>>> # Incorrect - hidden_states passed as keyword arg
>>> out = self.layer(hidden_states=hidden_states, attention_mask=attention_mask)
```
"""
gradient_checkpointing = False
def __call__(self, *args, **kwargs):
if self.gradient_checkpointing and self.training:
return self._gradient_checkpointing_func(partial(super().__call__, **kwargs), *args)
return super().__call__(*args, **kwargs)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.2.dev1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_to_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_to_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Gemma3 model."""
import unittest
from io import BytesIO
import requests
from PIL import Image
from transformers import is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch_accelerator,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import ShieldGemma2ForImageClassification, ShieldGemma2Processor
@slow
@require_torch_accelerator
# @require_read_token
class ShieldGemma2IntegrationTest(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model(self):
model_id = "google/shieldgemma-2-4b-it"
processor = ShieldGemma2Processor.from_pretrained(model_id, padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
response = requests.get(url)
image = Image.open(BytesIO(response.content))
model = ShieldGemma2ForImageClassification.from_pretrained(model_id, torch_dtype=torch.bfloat16).to(
torch_device
)
inputs = processor(images=[image]).to(torch_device)
output = model(**inputs)
self.assertEqual(len(output.probabilities), 3)
for element in output.probabilities:
self.assertEqual(len(element), 2)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Gemma3 model."""
import unittest
from io import BytesIO
import requests
from PIL import Image
from transformers import is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch_accelerator,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import ShieldGemma2ForImageClassification, ShieldGemma2Processor
@slow
@require_torch_accelerator
# @require_read_token
class ShieldGemma2IntegrationTest(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model(self):
model_id = "google/shieldgemma-2-4b-it"
processor = ShieldGemma2Processor.from_pretrained(model_id, padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
response = requests.get(url)
image = Image.open(BytesIO(response.content))
model = ShieldGemma2ForImageClassification.from_pretrained(
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16
).to(torch_device)
inputs = processor(images=[image]).to(torch_device)
output = model(**inputs)
self.assertEqual(len(output.probabilities), 3)
for element in output.probabilities:
self.assertEqual(len(element), 2)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
GaussianNoise,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._utils import check_type, get_bounding_boxes, has_all, has_any, query_chw, query_size
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
GaussianNoise,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
is_template=False,
user_id=test_user.id,
)
scheduler = get_service_client(ExecutionScheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule = scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[0].cron == "0 0 * * *"
scheduler.delete_schedule(schedule.id, user_id=test_user.id)
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
assert len(schedules) == 0
|
import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
is_template=False,
user_id=test_user.id,
)
scheduler = get_service_client(ExecutionScheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule_id = scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule_id
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[schedule_id] == "0 0 * * *"
scheduler.update_schedule(schedule_id, is_enabled=False, user_id=test_user.id)
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
assert len(schedules) == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.