input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.19.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.19.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256))
|
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture(scope="session")
def _stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model(_stsb_bert_tiny_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_stsb_bert_tiny_model)
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ReActJsonSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg)
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ReActJsonSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
raise ValueError("action not found")
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
raise OutputParserException(
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception:
if not includes_answer:
raise OutputParserException(f"Could not parse LLM output: {text}")
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
|
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_pandas_dataframe_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
msg = (
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise ImportError(msg)
msg = f"{name} does not exist"
raise AttributeError(msg)
|
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_pandas_dataframe_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise ImportError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise AttributeError(f"{name} does not exist")
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import StackingClassifier, StackingRegressor
from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc
CLASS_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [StackingClassifier, StackingRegressor],
"include_params": ["cv", "n_jobs", "passthrough", "verbose"],
"exclude_params": None,
"include_attrs": True,
"exclude_attrs": ["final_estimator_"],
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
]
FUNCTION_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": True,
"exclude_params": ["average", "zero_division"],
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": ["average"],
"exclude_params": None,
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": " ".join(
(
r"""This parameter is required for multiclass/multilabel targets\.
If ``None``, the metrics for each class are returned\. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``\.
This is applicable only if targets \(``y_\{true,pred\}``\) are binary\.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives\.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean\. This does not take label imbalance into account\.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support \(the number of true instances for each label\)\. This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall\."""
r"[\s\w]*\.*" # optionally match additional sentence
r"""
``'samples'``:
Calculate metrics for each instance, and find their average \(only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`\)\."""
).split()
),
},
]
@pytest.mark.parametrize("case", CLASS_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_class_docstring_consistency(case):
"""Check docstrings parameters consistency between related classes."""
assert_docstring_consistency(**case)
@pytest.mark.parametrize("case", FUNCTION_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_function_docstring_consistency(case):
"""Check docstrings parameters consistency between related functions."""
assert_docstring_consistency(**case)
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import StackingClassifier, StackingRegressor
from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc
CLASS_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [StackingClassifier, StackingRegressor],
"include_params": ["cv", "n_jobs", "passthrough", "verbose"],
"exclude_params": None,
"include_attrs": True,
"exclude_attrs": ["final_estimator_"],
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
]
FUNCTION_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": True,
"exclude_params": ["average", "zero_division"],
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": ["average"],
"exclude_params": None,
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": " ".join(
(
r"""This parameter is required for multiclass/multilabel targets\.
If ``None``, the metrics for each class are returned\. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``\.
This is applicable only if targets \(``y_\{true,pred\}``\) are binary\.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives\.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean\. This does not take label imbalance into account\.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support \(the number of true instances for each label\)\. This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall\."""
+ r"[\s\w]*\.*" # optionally match additional sentence
+ r"""
``'samples'``:
Calculate metrics for each instance, and find their average \(only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`\)\."""
).split()
),
},
]
@pytest.mark.parametrize("case", CLASS_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_class_docstring_consistency(case):
"""Check docstrings parameters consistency between related classes."""
assert_docstring_consistency(**case)
@pytest.mark.parametrize("case", FUNCTION_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_function_docstring_consistency(case):
"""Check docstrings parameters consistency between related functions."""
assert_docstring_consistency(**case)
|
import hashlib
import os
from pathlib import Path
from typing import List
from urllib.parse import quote, urlencode
import requests
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
from sphinx.util.docutils import SphinxDirective
_THIS_DIR = Path(__file__).parent
# Color palette from PyTorch Developer Day 2021 Presentation Template
YELLOW = "F9DB78"
GREEN = "70AD47"
BLUE = "00B0F0"
PINK = "FF71DA"
ORANGE = "FF8300"
TEAL = "00E5D1"
GRAY = "7F7F7F"
def _get_cache_path(key, ext):
filename = f"{hashlib.sha256(key).hexdigest()}{ext}"
cache_dir = _THIS_DIR / "gen_images"
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir / filename
def _download(url, path):
response = requests.get(url)
response.raise_for_status()
with open(path, "wb") as file:
file.write(response.content)
def _fetch_image(url):
path = _get_cache_path(url.encode("utf-8"), ext=".svg")
if not path.exists():
_download(url, path)
return os.sep + str(path.relative_to(_THIS_DIR))
def _get_relpath(target, base):
target = os.sep + target
base = os.sep + base
target_path, filename = os.path.split(target)
rel_path = os.path.relpath(target_path, os.path.dirname(base))
return os.path.normpath(os.path.join(rel_path, filename))
class BaseShield(Image, SphinxDirective):
def run(self, params, alt, section) -> List[nodes.Node]:
url = f"https://img.shields.io/static/v1?{urlencode(params, quote_via=quote)}"
path = _fetch_image(url)
self.arguments = [path]
self.options["alt"] = alt
if "class" not in self.options:
self.options["class"] = []
self.options["class"].append("shield-badge")
target = _get_relpath("supported_features.html", self.env.docname)
self.options["target"] = f"{target}#{section}"
return super().run()
def _parse_devices(arg: str):
devices = sorted(arg.strip().split())
valid_values = {"CPU", "CUDA"}
if any(val not in valid_values for val in devices):
raise ValueError(
f"One or more device values are not valid. The valid values are {valid_values}. Given value: '{arg}'"
)
return ", ".join(sorted(devices))
def _parse_properties(arg: str):
properties = sorted(arg.strip().split())
valid_values = {"Autograd", "TorchScript"}
if any(val not in valid_values for val in properties):
raise ValueError(
"One or more property values are not valid. "
f"The valid values are {valid_values}. "
f"Given value: '{arg}'"
)
return ", ".join(sorted(properties))
class SupportedDevices(BaseShield):
"""List the supported devices"""
required_arguments = 1
final_argument_whitespace = True
def run(self) -> List[nodes.Node]:
devices = _parse_devices(self.arguments[0])
alt = f"This feature supports the following devices: {devices}"
params = {
"label": "Devices",
"message": devices,
"labelColor": GRAY,
"color": BLUE,
"style": "flat-square",
}
return super().run(params, alt, "devices")
class SupportedProperties(BaseShield):
"""List the supported properties"""
required_arguments = 1
final_argument_whitespace = True
def run(self) -> List[nodes.Node]:
properties = _parse_properties(self.arguments[0])
alt = f"This API supports the following properties: {properties}"
params = {
"label": "Properties",
"message": properties,
"labelColor": GRAY,
"color": GREEN,
"style": "flat-square",
}
return super().run(params, alt, "properties")
|
import hashlib
from pathlib import Path
from typing import List
from urllib.parse import quote, urlencode
import requests
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
_THIS_DIR = Path(__file__).parent
# Color palette from PyTorch Developer Day 2021 Presentation Template
YELLOW = "F9DB78"
GREEN = "70AD47"
BLUE = "00B0F0"
PINK = "FF71DA"
ORANGE = "FF8300"
TEAL = "00E5D1"
GRAY = "7F7F7F"
def _get_cache_path(key, ext):
filename = f"{hashlib.sha256(key).hexdigest()}{ext}"
cache_dir = _THIS_DIR / "gen_images"
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir / filename
def _download(url, path):
response = requests.get(url)
response.raise_for_status()
with open(path, "wb") as file:
file.write(response.content)
def _fetch_image(url):
path = _get_cache_path(url.encode("utf-8"), ext=".svg")
if not path.exists():
_download(url, path)
return str(path.relative_to(_THIS_DIR))
class BaseShield(Image):
def run(self, params, alt, section) -> List[nodes.Node]:
url = f"https://img.shields.io/static/v1?{urlencode(params, quote_via=quote)}"
path = _fetch_image(url)
self.arguments = [path]
self.options["alt"] = alt
if "class" not in self.options:
self.options["class"] = []
self.options["class"].append("shield-badge")
self.options["target"] = f"supported_features.html#{section}"
return super().run()
def _parse_devices(arg: str):
devices = sorted(arg.strip().split())
valid_values = {"CPU", "CUDA"}
if any(val not in valid_values for val in devices):
raise ValueError(
f"One or more device values are not valid. The valid values are {valid_values}. Given value: '{arg}'"
)
return ", ".join(sorted(devices))
def _parse_properties(arg: str):
properties = sorted(arg.strip().split())
valid_values = {"Autograd", "TorchScript"}
if any(val not in valid_values for val in properties):
raise ValueError(
"One or more property values are not valid. "
f"The valid values are {valid_values}. "
f"Given value: '{arg}'"
)
return ", ".join(sorted(properties))
class SupportedDevices(BaseShield):
"""List the supported devices"""
required_arguments = 1
final_argument_whitespace = True
def run(self) -> List[nodes.Node]:
devices = _parse_devices(self.arguments[0])
alt = f"This feature supports the following devices: {devices}"
params = {
"label": "Devices",
"message": devices,
"labelColor": GRAY,
"color": BLUE,
"style": "flat-square",
}
return super().run(params, alt, "devices")
class SupportedProperties(BaseShield):
"""List the supported properties"""
required_arguments = 1
final_argument_whitespace = True
def run(self) -> List[nodes.Node]:
properties = _parse_properties(self.arguments[0])
alt = f"This API supports the following properties: {properties}"
params = {
"label": "Properties",
"message": properties,
"labelColor": GRAY,
"color": GREEN,
"style": "flat-square",
}
return super().run(params, alt, "properties")
|
"""Tracers that call listeners."""
from collections.abc import Awaitable
from typing import TYPE_CHECKING, Callable, Optional, Union
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
)
from langchain_core.tracers.base import AsyncBaseTracer, BaseTracer
from langchain_core.tracers.schemas import Run
if TYPE_CHECKING:
from uuid import UUID
Listener = Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
AsyncListener = Union[
Callable[[Run], Awaitable[None]], Callable[[Run, RunnableConfig], Awaitable[None]]
]
class RootListenersTracer(BaseTracer):
"""Tracer that calls listeners on run start, end, and error.
Parameters:
log_missing_parent: Whether to log a warning if the parent is missing.
Default is False.
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error.
"""
log_missing_parent = False
def __init__(
self,
*,
config: RunnableConfig,
on_start: Optional[Listener],
on_end: Optional[Listener],
on_error: Optional[Listener],
) -> None:
"""Initialize the tracer.
Args:
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error
"""
super().__init__(_schema_format="original+chat")
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
call_func_with_variable_args(self._arg_on_end, run, self.config)
else:
if self._arg_on_error is not None:
call_func_with_variable_args(self._arg_on_error, run, self.config)
class AsyncRootListenersTracer(AsyncBaseTracer):
"""Async Tracer that calls listeners on run start, end, and error.
Parameters:
log_missing_parent: Whether to log a warning if the parent is missing.
Default is False.
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error.
"""
log_missing_parent = False
def __init__(
self,
*,
config: RunnableConfig,
on_start: Optional[AsyncListener],
on_end: Optional[AsyncListener],
on_error: Optional[AsyncListener],
) -> None:
"""Initialize the tracer.
Args:
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error
"""
super().__init__(_schema_format="original+chat")
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
async def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
async def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
await acall_func_with_variable_args(self._arg_on_start, run, self.config)
async def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
await acall_func_with_variable_args(self._arg_on_end, run, self.config)
else:
if self._arg_on_error is not None:
await acall_func_with_variable_args(
self._arg_on_error, run, self.config
)
|
from collections.abc import Awaitable
from typing import TYPE_CHECKING, Callable, Optional, Union
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
)
from langchain_core.tracers.base import AsyncBaseTracer, BaseTracer
from langchain_core.tracers.schemas import Run
if TYPE_CHECKING:
from uuid import UUID
Listener = Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
AsyncListener = Union[
Callable[[Run], Awaitable[None]], Callable[[Run, RunnableConfig], Awaitable[None]]
]
class RootListenersTracer(BaseTracer):
"""Tracer that calls listeners on run start, end, and error.
Parameters:
log_missing_parent: Whether to log a warning if the parent is missing.
Default is False.
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error.
"""
log_missing_parent = False
def __init__(
self,
*,
config: RunnableConfig,
on_start: Optional[Listener],
on_end: Optional[Listener],
on_error: Optional[Listener],
) -> None:
"""Initialize the tracer.
Args:
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error
"""
super().__init__(_schema_format="original+chat")
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
call_func_with_variable_args(self._arg_on_end, run, self.config)
else:
if self._arg_on_error is not None:
call_func_with_variable_args(self._arg_on_error, run, self.config)
class AsyncRootListenersTracer(AsyncBaseTracer):
"""Async Tracer that calls listeners on run start, end, and error.
Parameters:
log_missing_parent: Whether to log a warning if the parent is missing.
Default is False.
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error.
"""
log_missing_parent = False
def __init__(
self,
*,
config: RunnableConfig,
on_start: Optional[AsyncListener],
on_end: Optional[AsyncListener],
on_error: Optional[AsyncListener],
) -> None:
"""Initialize the tracer.
Args:
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error
"""
super().__init__(_schema_format="original+chat")
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
async def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
async def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
await acall_func_with_variable_args(self._arg_on_start, run, self.config)
async def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
await acall_func_with_variable_args(self._arg_on_end, run, self.config)
else:
if self._arg_on_error is not None:
await acall_func_with_variable_args(
self._arg_on_error, run, self.config
)
|
_base_ = 'solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = 'solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
# TODO: Update after mmcv.RandomChoiceResize finish refactor
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
file_to_skip = ['fastAPI', 'jina']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
files_to_check = [
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
]
file_to_remove = []
for file in files_to_check:
for fn in file_to_skip:
if fn in str(file):
file_to_remove.append(file)
for file in file_to_remove:
files_to_check.remove(file)
@pytest.mark.parametrize('fpath', files_to_check, ids=str)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True, keyword_ignore=['pickle'])
def test_readme():
check_md_file(
fpath='README.md', memory=True, keyword_ignore=['tensorflow', 'fastapi', 'push']
)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
@pytest.mark.parametrize(
'fpath',
[
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
],
ids=str,
)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True)
def test_readme():
check_md_file(
fpath='README.md', memory=True, keyword_ignore=['tensorflow', 'fastapi', 'push']
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
str_ = t.__repr__()
if 'Compose(' in str_:
str_ = str_.replace('\n', '\n ')
format_string += '\n'
format_string += f' {str_}'
format_string += '\n)'
return format_string
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocList[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocList[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocList.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocList[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
def test_from_remote_csv_file():
remote_url = 'https://github.com/docarray/docarray/blob/main/tests/toydata/books.csv?raw=true'
class Book(BaseDoc):
title: str
author: str
year: int
books = DocList[Book].from_csv(file_path=remote_url)
assert len(books) == 3
def test_doc_list_error(tmpdir):
class Book(BaseDoc):
title: str
docs = DocList([Book(title='hello'), Book(title='world')])
tmp_file = str(tmpdir / 'tmp.csv')
with pytest.raises(TypeError):
docs.to_csv(tmp_file)
def test_union_type_error(tmp_path):
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
docs.to_csv(str(tmp_path) + ".csv")
DocList[CustomDoc].from_csv(str(tmp_path) + ".csv")
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_basic.to_csv(str(tmp_path) + ".csv")
docs_copy = DocList[BasisUnion].from_csv(str(tmp_path) + ".csv")
assert docs_copy == docs_basic
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocList[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocList[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocList.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocList[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
def test_from_remote_csv_file():
remote_url = 'https://github.com/docarray/docarray/blob/main/tests/toydata/books.csv?raw=true'
class Book(BaseDoc):
title: str
author: str
year: int
books = DocList[Book].from_csv(file_path=remote_url)
assert len(books) == 3
def test_doc_list_error(tmpdir):
class Book(BaseDoc):
title: str
docs = DocList([Book(title='hello'), Book(title='world')])
tmp_file = str(tmpdir / 'tmp.csv')
with pytest.raises(TypeError):
docs.to_csv(tmp_file)
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
import torch
from docarray import BaseDoc
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDoc):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(BaseDoc):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
|
import torch
from docarray import BaseDocument
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDocument):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(BaseDocument):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'Resize', 'RandomFlip', 'RandomCrop', 'Normalize',
'SegRescale', 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion',
'Albu', 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut',
'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize', 'Brightness',
'Contrast', 'TranslateX', 'TranslateY', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations',
'Pad', 'GeomTransform', 'ColorTransform', 'RandAugment', 'Sharpness',
'Solarize', 'SolarizeAdd', 'Posterize', 'AutoContrast', 'Invert',
'MultiBranch', 'RandomErasing', 'LoadEmptyAnnotations', 'RandomOrder'
]
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_vilt import *
from .feature_extraction_vilt import *
from .image_processing_vilt import *
from .image_processing_vilt_fast import *
from .modeling_vilt import *
from .processing_vilt import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_vilt import *
from .feature_extraction_vilt import *
from .image_processing_vilt import *
from .modeling_vilt import *
from .processing_vilt import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, DocumentArray, Executor
from pdf_segmenter import PDFSegmenter
from PIL import Image
@pytest.fixture()
def executor():
return PDFSegmenter()
@pytest.fixture()
def executor_from_config():
return Executor.load_config('config.yml')
def test_empty_docs(executor):
da = DocumentArray()
executor.craft(da)
assert len(da) == 0
def test_none_input(executor):
executor.craft(None)
def test_io_images_and_text(
executor_from_config, test_dir, doc_generator_img_text, expected_text
):
doc_array = doc_generator_img_text
assert len(doc_array) > 0
for doc in doc_array:
executor_from_config.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
def test_io_text(executor_from_config, doc_generator_text, expected_text):
doc_array = doc_generator_text
assert len(doc_array) > 0
for doc in doc_array:
executor_from_config.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 1
# Check test
assert chunks[0].text == expected_text
assert chunks[0].mime_type == 'text/plain'
def test_io_img(executor_from_config, test_dir, doc_generator_img):
doc_array = doc_generator_img
assert len(doc_array) > 0
for doc in doc_array:
executor_from_config.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
from jina import Executor
from jina.executors import BaseExecutor
from PIL import Image
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
def test_io_images_and_text(test_dir, doc_generator_img_text, expected_text):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_img_text
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
def test_io_text(doc_generator_text, expected_text):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_text
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 1
# Check test
assert chunks[0].text == expected_text
assert chunks[0].mime_type == 'text/plain'
def test_io_img(test_dir, doc_generator_img):
crafter = BaseExecutor.load_config('config.yml')
doc_array = doc_generator_img
for doc in doc_array:
crafter.craft(doc)
chunks = doc[0].chunks
assert len(chunks) == 3
# Check images
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .memory_profiler_hook import MemoryProfilerHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook', 'SetEpochInfoHook', 'MemoryProfilerHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook', 'SetEpochInfoHook'
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[2, 4, 6, 8]),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.12,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.12,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]:
if isinstance(inpt, torch.Tensor) and (
torch.jit.is_scripting() or not isinstance(inpt, (datapoints.Image, datapoints.Video))
):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
if isinstance(inpt, torch.Tensor) and (
torch.jit.is_scripting() or not isinstance(inpt, (features.Image, features.Video))
):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, features.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return features.Image.wrap_like(inpt, output)
elif isinstance(inpt, features.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return features.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` tensor subclass, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
"""Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_parser()
spp = parser.add_subparsers(
dest='export',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command',
required=True,
)
set_export_flowchart_parser(
spp.add_parser(
'flowchart',
help='Export a Flow YAML file to a flowchart',
formatter_class=_chf,
)
)
set_export_k8s_parser(
spp.add_parser(
'kubernetes',
help='Export a Flow YAML file to a Kubernetes YAML bundle',
formatter_class=_chf,
)
)
set_export_docker_compose_parser(
spp.add_parser(
'docker-compose',
help='Export a Flow YAML file to a Docker Compose YAML file',
formatter_class=_chf,
)
)
set_export_schema_parser(
spp.add_parser(
'schema',
help='Export Jina Executor & Flow API to JSONSchema files',
formatter_class=_chf,
)
)
return parser
def mixin_base_io_parser(parser):
"""Add basic IO parsing args
:param parser: the parser configure
"""
parser.add_argument(
'config_path', type=str, metavar='INPUT', help='The input file path of a Flow or Deployment YAML '
)
parser.add_argument(
'outpath',
type=str,
metavar='OUTPUT',
help='The output path',
)
def set_export_docker_compose_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--network_name',
type=str,
help='The name of the network that will be used by the deployment name.',
)
return parser
def set_export_k8s_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--k8s-namespace',
type=str,
help='The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.',
)
return parser
def set_export_flowchart_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--vertical-layout',
action='store_true',
default=False,
help='If set, then the flowchart is rendered vertically from top to down.',
)
return parser
def set_export_schema_parser(parser=None):
"""Set the parser for the API export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'--yaml-path',
type=str,
nargs='*',
metavar='PATH',
help='The YAML file path for storing the exported API',
)
parser.add_argument(
'--json-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSON file path for storing the exported API',
)
parser.add_argument(
'--schema-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSONSchema file path for storing the exported API',
)
return parser
|
"""Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_parser()
spp = parser.add_subparsers(
dest='export',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command',
required=True,
)
set_export_flowchart_parser(
spp.add_parser(
'flowchart',
help='Export a Flow YAML file to a flowchart',
formatter_class=_chf,
)
)
set_export_k8s_parser(
spp.add_parser(
'kubernetes',
help='Export a Flow YAML file to a Kubernetes YAML bundle',
formatter_class=_chf,
)
)
set_export_docker_compose_parser(
spp.add_parser(
'docker-compose',
help='Export a Flow YAML file to a Docker Compose YAML file',
formatter_class=_chf,
)
)
set_export_schema_parser(
spp.add_parser(
'schema',
help='Export Jina Executor & Flow API to JSONSchema files',
formatter_class=_chf,
)
)
return parser
def mixin_base_io_parser(parser):
"""Add basic IO parsing args
:param parser: the parser configure
"""
parser.add_argument(
'flowpath', type=str, metavar='INPUT', help='The input file path of a Flow YAML'
)
parser.add_argument(
'outpath',
type=str,
metavar='OUTPUT',
help='The output path',
)
def set_export_docker_compose_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--network_name',
type=str,
help='The name of the network that will be used by the deployment name.',
)
return parser
def set_export_k8s_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--k8s-namespace',
type=str,
help='The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.',
)
return parser
def set_export_flowchart_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--vertical-layout',
action='store_true',
default=False,
help='If set, then the flowchart is rendered vertically from top to down.',
)
return parser
def set_export_schema_parser(parser=None):
"""Set the parser for the API export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'--yaml-path',
type=str,
nargs='*',
metavar='PATH',
help='The YAML file path for storing the exported API',
)
parser.add_argument(
'--json-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSON file path for storing the exported API',
)
parser.add_argument(
'--schema-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSONSchema file path for storing the exported API',
)
return parser
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAngleLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderKLTemporalDecoder
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderKLTemporalDecoderTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderKLTemporalDecoder
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 3
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
num_frames = 3
return {"sample": image, "num_frames": num_frames}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"latent_channels": 4,
"layers_per_block": 2,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Encoder", "TemporalDecoder", "UNetMidBlock2D"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("Test unsupported.")
def test_forward_with_norm_groups(self):
pass
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderKLTemporalDecoder
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderKLTemporalDecoderTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderKLTemporalDecoder
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 3
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
num_frames = 3
return {"sample": image, "num_frames": num_frames}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"latent_channels": 4,
"layers_per_block": 2,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Encoder", "TemporalDecoder"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("Test unsupported.")
def test_forward_with_norm_groups(self):
pass
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'target',
type=str,
choices=['flow', 'executor', 'gateway'],
help='The target type to ping. For `executor` and `gateway`, checks the readiness of the individual service. '
'For `flow` it checks the connectivity of the complete microservice architecture.',
default='executor',
)
parser.add_argument(
'host',
type=str,
help='The host address with port of a target Executor, Gateway or a Flow, e.g. 0.0.0.0:8000. For Flow or Gateway, host can also indicate the protocol, grpc will be used if not provided, e.g http://0.0.0.0:8000',
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--attempts',
type=int,
default=1,
help='The number of readiness checks to perform',
)
parser.add_argument(
'--min-successful-attempts',
type=int,
default=1,
help='The minimum number of successful readiness checks, before exiting successfully with exit(0)',
)
return parser
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'target',
type=str,
choices=['flow', 'executor', 'gateway'],
help='The target type to ping. For `executor` and `gateway`, checks the readiness of the individual service. '
'For `flow` it checks the connectivity of the complete microservice architecture.',
default='executor',
)
parser.add_argument(
'host',
type=str,
help='The host address with port of a target Executor, Gateway or a Flow, e.g. 0.0.0.0:8000. For Flow or Gateway, host can also indicate the protocol, grpc will be used if not provided, e.g http://0.0.0.0:8000',
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--attempts',
type=int,
default=1,
help='The number of readiness checks to perform',
)
parser.add_argument(
'--min-successful-attempts',
type=int,
default=1,
help='The minimum number of successful readiness checks, before exiting successfully with exit(0)',
)
return parser
|
from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor, TorchEmbedding, TorchTensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'TorchTensor',
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl',
'AnyUrl',
'ID',
'Tensor',
'TorchEmbedding',
]
|
from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor, TorchTensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'TorchTensor',
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl',
'AnyUrl',
'ID',
'Tensor',
]
|
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=3665,
warmup_ratio=1.0 / 80,
step=[17, 23])
runner = dict(type='EpochBasedRunner', max_epochs=25)
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
# model settings
model = dict(
type='SOLO',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init htc RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init htc RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=3,
dataset=dict( # RepeatDataset
# the dataset is repeated 10 times, and the training schedule is 2x,
# so the actual epoch = 12 * 10 = 120.
times=10,
dataset=dict( # ConcatDataset
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 20],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 20])
checkpoint_config = dict(interval=1)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
if header[1] != "path":
raise ValueError(f"expect `header[1]` to be 'path', but got {header[1]}")
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* [:footcite:`ardila2020common`].
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
assert header[1] == "path"
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* [:footcite:`ardila2020common`].
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
self.seed = sync_random_seed(seed)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances'))
]
train_dataloader = dict(
batch_size=8, # using 32 GPUS while training. total batch size is 32 x 8
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=3, # repeat 3 times, total epochs are 12 x 3
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.001,
by_epoch=False,
begin=0,
end=20000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=256)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
input_size = 300
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances'))
]
train_dataloader = dict(
batch_size=8, # using 32 GPUS while training. total batch size is 32 x 8
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=3, # repeat 3 times, total epochs are 12 x 3
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.001,
by_epoch=False,
begin=0,
end=20000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=256)
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str, **kwargs) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:param kwargs: keyword arguments to pass to `urlopen` such as timeout
:return: blob bytes.
"""
timeout = kwargs.get('timeout', None)
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
urlopen_kwargs = {'timeout': timeout} if timeout is not None else {}
with urllib.request.urlopen(req, **urlopen_kwargs) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{uri}` is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'wb')
return file_ctx
def _to_datauri(
mimetype, data, charset: str = 'utf-8', base64: bool = False, binary: bool = True
) -> str:
"""
Convert data to data URI.
:param mimetype: MIME types (e.g. 'text/plain','image/png' etc.)
:param data: Data representations.
:param charset: Charset may be any character set registered with IANA
:param base64: Used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:param binary: True if from binary data False for other data (e.g. text)
:return: URI data
"""
parts = ['data:', mimetype]
if charset is not None:
parts.extend([';charset=', charset])
if base64:
parts.append(';base64')
from base64 import encodebytes as encode64
if binary:
encoded_data = encode64(data).decode(charset).replace('\n', '').strip()
else:
encoded_data = encode64(data).strip()
else:
from urllib.parse import quote_from_bytes, quote
if binary:
encoded_data = quote_from_bytes(data)
else:
encoded_data = quote(data)
parts.extend([',', encoded_data])
return ''.join(parts)
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{uri}` is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'wb')
return file_ctx
def _to_datauri(
mimetype, data, charset: str = 'utf-8', base64: bool = False, binary: bool = True
) -> str:
"""
Convert data to data URI.
:param mimetype: MIME types (e.g. 'text/plain','image/png' etc.)
:param data: Data representations.
:param charset: Charset may be any character set registered with IANA
:param base64: Used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:param binary: True if from binary data False for other data (e.g. text)
:return: URI data
"""
parts = ['data:', mimetype]
if charset is not None:
parts.extend([';charset=', charset])
if base64:
parts.append(';base64')
from base64 import encodebytes as encode64
if binary:
encoded_data = encode64(data).decode(charset).replace('\n', '').strip()
else:
encoded_data = encode64(data).strip()
else:
from urllib.parse import quote_from_bytes, quote
if binary:
encoded_data = quote_from_bytes(data)
else:
encoded_data = quote(data)
parts.extend([',', encoded_data])
return ''.join(parts)
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='RPN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
# model settings
model = dict(
type='RPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(path='/runtime_info')
def _get_info():
return {
'ports': self.ports,
'protocols': self.protocols,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
from jina.parsers.logging import mixin_suppress_root_logging_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--reload',
action='store_true',
default=False,
help='If set, auto-reloading on file changes is enabled: the Flow will restart while blocked if YAML '
'configuration source is changed. This also applies apply to underlying Executors, if their source '
'code or YAML configuration has changed.',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_suppress_root_logging_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--reload',
action='store_true',
default=False,
help='If set, auto-reloading on file changes is enabled: the Flow will restart while blocked if YAML '
'configuration source is changed. This also applies apply to underlying Executors, if their source '
'code or YAML configuration has changed.',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
from typing import cast
from pydantic import SecretStr
from langchain_community.embeddings import MiniMaxEmbeddings
def test_initialization_with_alias() -> None:
"""Test minimax embedding model initialization with alias."""
api_key = "your-api-key"
group_id = "your-group-id"
embeddings = MiniMaxEmbeddings(
api_key=api_key, # type: ignore[arg-type]
group_id=group_id,
)
assert cast(SecretStr, embeddings.minimax_api_key).get_secret_value() == api_key
assert embeddings.minimax_group_id == group_id
|
from typing import cast
from pydantic import SecretStr
from langchain_community.embeddings import MiniMaxEmbeddings
def test_initialization_with_alias() -> None:
"""Test minimax embedding model initialization with alias."""
api_key = "your-api-key"
group_id = "your-group-id"
embeddings = MiniMaxEmbeddings( # type: ignore[arg-type, call-arg]
api_key=api_key, # type: ignore[arg-type]
group_id=group_id, # type: ignore[arg-type]
)
assert cast(SecretStr, embeddings.minimax_api_key).get_secret_value() == api_key
assert embeddings.minimax_group_id == group_id
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import construct_toy_data, create_full_masks, create_random_bboxes
__all__ = ['create_random_bboxes', 'create_full_masks', 'construct_toy_data']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import create_random_bboxes
__all__ = ['create_random_bboxes']
|
"""Test text splitting functionality using NLTK and Spacy based sentence splitters."""
from typing import Any
import nltk
import pytest
from langchain_core.documents import Document
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.spacy import SpacyTextSplitter
def setup_module() -> None:
nltk.download("punkt_tab")
@pytest.fixture
def spacy() -> Any:
try:
import spacy
except ImportError:
pytest.skip("Spacy not installed.")
spacy.cli.download("en_core_web_sm") # type: ignore[attr-defined,operator,unused-ignore]
return spacy
def test_nltk_text_splitting_args() -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
NLTKTextSplitter(chunk_size=2, chunk_overlap=4)
def test_spacy_text_splitting_args(spacy: Any) -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
SpacyTextSplitter(chunk_size=2, chunk_overlap=4)
def test_nltk_text_splitter() -> None:
"""Test splitting by sentence using NLTK."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = NLTKTextSplitter(separator=separator)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(separator=separator, pipeline=pipeline)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter_strip_whitespace(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(
separator=separator, pipeline=pipeline, strip_whitespace=False
)
output = splitter.split_text(text)
expected_output = [f"This is sentence one. {separator}And this is sentence two."]
assert output == expected_output
def test_nltk_text_splitter_args() -> None:
"""Test invalid arguments for NLTKTextSplitter."""
with pytest.raises(ValueError):
NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="\n\n",
use_span_tokenize=True,
)
def test_nltk_text_splitter_with_add_start_index() -> None:
splitter = NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="",
use_span_tokenize=True,
add_start_index=True,
)
txt = (
"Innovation drives our success. "
"Collaboration fosters creative solutions. "
"Efficiency enhances data management."
)
docs = [Document(txt)]
chunks = splitter.split_documents(docs)
assert len(chunks) == 2
for chunk in chunks:
s_i = chunk.metadata["start_index"]
assert chunk.page_content == txt[s_i : s_i + len(chunk.page_content)]
|
"""Test text splitting functionality using NLTK and Spacy based sentence splitters."""
from typing import Any
import nltk
import pytest
from langchain_core.documents import Document
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.spacy import SpacyTextSplitter
def setup_module() -> None:
nltk.download("punkt_tab")
@pytest.fixture()
def spacy() -> Any:
try:
import spacy
except ImportError:
pytest.skip("Spacy not installed.")
spacy.cli.download("en_core_web_sm") # type: ignore[attr-defined,operator,unused-ignore]
return spacy
def test_nltk_text_splitting_args() -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
NLTKTextSplitter(chunk_size=2, chunk_overlap=4)
def test_spacy_text_splitting_args(spacy: Any) -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
SpacyTextSplitter(chunk_size=2, chunk_overlap=4)
def test_nltk_text_splitter() -> None:
"""Test splitting by sentence using NLTK."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = NLTKTextSplitter(separator=separator)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(separator=separator, pipeline=pipeline)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter_strip_whitespace(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(
separator=separator, pipeline=pipeline, strip_whitespace=False
)
output = splitter.split_text(text)
expected_output = [f"This is sentence one. {separator}And this is sentence two."]
assert output == expected_output
def test_nltk_text_splitter_args() -> None:
"""Test invalid arguments for NLTKTextSplitter."""
with pytest.raises(ValueError):
NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="\n\n",
use_span_tokenize=True,
)
def test_nltk_text_splitter_with_add_start_index() -> None:
splitter = NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="",
use_span_tokenize=True,
add_start_index=True,
)
txt = (
"Innovation drives our success. "
"Collaboration fosters creative solutions. "
"Efficiency enhances data management."
)
docs = [Document(txt)]
chunks = splitter.split_documents(docs)
assert len(chunks) == 2
for chunk in chunks:
s_i = chunk.metadata["start_index"]
assert chunk.page_content == txt[s_i : s_i + len(chunk.page_content)]
|
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return datapoints.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertDtype(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints.TensorImageType, datapoints.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ConvertColorSpace(Transform):
_transformed_types = (
is_simple_tensor,
datapoints.Image,
PIL.Image.Image,
datapoints.Video,
)
def __init__(
self,
color_space: Union[str, datapoints.ColorSpace],
old_color_space: Optional[Union[str, datapoints.ColorSpace]] = None,
) -> None:
super().__init__()
if isinstance(color_space, str):
color_space = datapoints.ColorSpace.from_str(color_space)
self.color_space = color_space
if isinstance(old_color_space, str):
old_color_space = datapoints.ColorSpace.from_str(old_color_space)
self.old_color_space = old_color_space
def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
return F.convert_color_space(inpt, color_space=self.color_space, old_color_space=self.old_color_space)
class ClampBoundingBoxes(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return datapoints.BoundingBox.wrap_like(inpt, output)
|
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (features.BoundingBox,)
def __init__(self, format: Union[str, features.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = features.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return features.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertDtype(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, features.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]
) -> Union[features.TensorImageType, features.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ConvertColorSpace(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, PIL.Image.Image, features.Video)
def __init__(
self,
color_space: Union[str, features.ColorSpace],
old_color_space: Optional[Union[str, features.ColorSpace]] = None,
) -> None:
super().__init__()
if isinstance(color_space, str):
color_space = features.ColorSpace.from_str(color_space)
self.color_space = color_space
if isinstance(old_color_space, str):
old_color_space = features.ColorSpace.from_str(old_color_space)
self.old_color_space = old_color_space
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
return F.convert_color_space(inpt, color_space=self.color_space, old_color_space=self.old_color_space)
class ClampBoundingBoxes(Transform):
_transformed_types = (features.BoundingBox,)
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return features.BoundingBox.wrap_like(inpt, output)
|
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT_SECONDS = 900
REQUEST_ORIGIN = "llamaindex"
class AgentQLWebReader(BasePydanticReader):
"""
Scrape a URL with or without a agentql query and returns document in json format.
Args:
api_key (str): The AgentQL API key, get one at https://dev.agentql.com
params (dict): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
"""
api_key: str
params: Optional[dict]
def __init__(
self,
api_key: str,
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, params=params)
def load_data(
self, url: str, query: Optional[str] = None, prompt: Optional[str] = None
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (str): URL to scrape or crawl.
query (Optional[str]): AgentQL query used to specify the scraped data.
prompt (Optional[str]): Natural language description of the data you want to scrape.
Either query or prompt must be provided.
params (Optional[dict]): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
Returns:
List[Document]: List of documents.
"""
payload = {"url": url, "query": query, "prompt": prompt, "params": self.params}
headers = {
"X-API-Key": f"{self.api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
try:
response = httpx.post(
QUERY_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=API_TIMEOUT_SECONDS,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
response = e.response
if response.status_code in [401, 403]:
raise ValueError(
"Please, provide a valid API Key. You can create one at https://dev.agentql.com."
) from e
else:
try:
error_json = response.json()
msg = (
error_json["error_info"]
if "error_info" in error_json
else error_json["detail"]
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
else:
json = response.json()
return [Document(text=str(json["data"]), metadata=json["metadata"])]
|
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT_SECONDS = 900
REQUEST_ORIGIN = "llamaindex"
class AgentQLWebReader(BasePydanticReader):
"""
Scrape a URL with or without a agentql query and returns document in json format.
Args:
api_key (str): The AgentQL API key, get one at https://dev.agentql.com
params (dict): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
"""
api_key: str
params: Optional[dict]
def __init__(
self,
api_key: str,
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, params=params)
def load_data(
self, url: str, query: Optional[str] = None, prompt: Optional[str] = None
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (str): URL to scrape or crawl.
query (Optional[str]): AgentQL query used to specify the scraped data.
prompt (Optional[str]): Natural language description of the data you want to scrape.
Either query or prompt must be provided.
params (Optional[dict]): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
Returns:
List[Document]: List of documents.
"""
payload = {"url": url, "query": query, "prompt": prompt, "params": self.params}
headers = {
"X-API-Key": f"{self.api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
try:
response = httpx.post(
QUERY_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=API_TIMEOUT_SECONDS,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
response = e.response
if response.status_code in [401, 403]:
raise ValueError(
"Please, provide a valid API Key. You can create one at https://dev.agentql.com."
) from e
else:
try:
error_json = response.json()
msg = (
error_json["error_info"]
if "error_info" in error_json
else error_json["detail"]
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
else:
json = response.json()
return [Document(text=str(json["data"]), metadata=json["metadata"])]
|
from __future__ import annotations
import asyncio
import threading
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.ainetwork.utils import authenticate
if TYPE_CHECKING:
from ain.ain import Ain
class OperationType(str, Enum):
"""Type of operation as enumerator."""
SET = "SET"
GET = "GET"
class AINBaseTool(BaseTool):
"""Base class for the AINetwork tools."""
interface: Ain = Field(default_factory=authenticate)
"""The interface object for the AINetwork Blockchain."""
def _run(
self,
*args: Any,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
result_container = []
def thread_target() -> None:
nonlocal result_container
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result_container.append(
new_loop.run_until_complete(self._arun(*args, **kwargs))
)
except Exception as e:
result_container.append(e)
finally:
new_loop.close()
thread = threading.Thread(target=thread_target)
thread.start()
thread.join()
result = result_container[0]
if isinstance(result, Exception):
raise result
return result
else:
result = loop.run_until_complete(self._arun(*args, **kwargs))
loop.close()
return result
|
from __future__ import annotations
import asyncio
import threading
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.ainetwork.utils import authenticate
if TYPE_CHECKING:
from ain.ain import Ain
class OperationType(str, Enum):
"""Type of operation as enumerator."""
SET = "SET"
GET = "GET"
class AINBaseTool(BaseTool): # type: ignore[override]
"""Base class for the AINetwork tools."""
interface: Ain = Field(default_factory=authenticate)
"""The interface object for the AINetwork Blockchain."""
def _run(
self,
*args: Any,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
result_container = []
def thread_target() -> None:
nonlocal result_container
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result_container.append(
new_loop.run_until_complete(self._arun(*args, **kwargs))
)
except Exception as e:
result_container.append(e)
finally:
new_loop.close()
thread = threading.Thread(target=thread_target)
thread.start()
thread.join()
result = result_container[0]
if isinstance(result, Exception):
raise result
return result
else:
result = loop.run_until_complete(self._arun(*args, **kwargs))
loop.close()
return result
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
:param model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example::
from sentence_transformers import SentenceTransformer, InputExample, losses
from torch.utils.data import DataLoader
model_en = SentenceTransformer('bert-base-cased')
model_fr = SentenceTransformer('flaubert/flaubert_base_cased')
examples_en = ['The first sentence', 'The second sentence', 'The third sentence', 'The fourth sentence']
examples_fr = ['La première phrase', 'La deuxième phrase', 'La troisième phrase', 'La quatrième phrase']
train_batch_size = 2
labels_en_en = model_en.encode(examples_en)
examples_en_fr = [InputExample(texts=[x], label=labels_en_en[i]) for i, x in enumerate(examples_en)]
loader_en_fr = DataLoader(examples_en_fr, batch_size=train_batch_size)
examples_fr_fr = [InputExample(texts=[x], label=labels_en_en[i]) for i, x in enumerate(examples_fr)]
loader_fr_fr = DataLoader(examples_fr_fr, batch_size=train_batch_size)
train_loss = losses.MSELoss(model=model_fr)
model_fr.fit(
[(loader_en_fr, train_loss), (loader_fr_fr, train_loss)],
epochs=10,
)
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
# Concatenate multiple inputs on the batch dimension
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
if len(sentence_features) > 1:
# Repeat the labels for each input
labels = labels.repeat(len(sentence_features), 1)
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from torch import nn, Tensor
from typing import Iterable, Dict
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
:param model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-------------------+-----------------------------+
| Texts | Labels |
+===================+=============================+
| single sentences | model sentence embeddings |
+-------------------+-----------------------------+
Example::
from sentence_transformers import SentenceTransformer, InputExample, losses
from torch.utils.data import DataLoader
model_en = SentenceTransformer('bert-base-cased')
model_fr = SentenceTransformer('flaubert/flaubert_base_cased')
examples_en = ['The first sentence', 'The second sentence', 'The third sentence', 'The fourth sentence']
examples_fr = ['La première phrase', 'La deuxième phrase', 'La troisième phrase', 'La quatrième phrase']
train_batch_size = 2
labels_en_en = model_en.encode(examples_en)
examples_en_fr = [InputExample(texts=[x], label=labels_en_en[i]) for i, x in enumerate(examples_en)]
loader_en_fr = DataLoader(examples_en_fr, batch_size=train_batch_size)
examples_fr_fr = [InputExample(texts=[x], label=labels_en_en[i]) for i, x in enumerate(examples_fr)]
loader_fr_fr = DataLoader(examples_fr_fr, batch_size=train_batch_size)
train_loss = losses.MSELoss(model=model_fr)
model_fr.fit(
[(loader_en_fr, train_loss), (loader_fr_fr, train_loss)],
epochs=10,
)
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(rep, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall'
]
|
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall'
]
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
from .bifpn import BiFPN
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
from .huber_loss import HuberLoss
from .tensorflow.anchor_generator import YXYXAnchorGenerator
from .tensorflow.coco_90class import Coco90Dataset
from .tensorflow.coco_90metric import Coco90Metric
from .tensorflow.trans_max_iou_assigner import TransMaxIoUAssigner
from .tensorflow.yxyx_bbox_coder import YXYXDeltaXYWHBBoxCoder
from .utils import Conv2dSamePadding
__all__ = [
'EfficientDet', 'BiFPN', 'HuberLoss', 'EfficientDetSepBNHead',
'Conv2dSamePadding', 'Coco90Dataset', 'Coco90Metric',
'YXYXAnchorGenerator', 'TransMaxIoUAssigner', 'YXYXDeltaXYWHBBoxCoder'
]
|
from .anchor_generator import YXYXAnchorGenerator
from .bifpn import BiFPN
from .coco_90class import Coco90Dataset
from .coco_90metric import Coco90Metric
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
from .trans_max_iou_assigner import TransMaxIoUAssigner
from .yxyx_bbox_coder import YXYXDeltaXYWHBBoxCoder
__all__ = [
'EfficientDet', 'BiFPN', 'EfficientDetSepBNHead', 'YXYXAnchorGenerator',
'YXYXDeltaXYWHBBoxCoder', 'Coco90Dataset', 'Coco90Metric',
'TransMaxIoUAssigner'
]
|
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
input_size=input_size,
strides=[8, 16, 32, 64, 128, 256, 512],
basesize_ratio_range=(0.15, 0.9),
ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]))))
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
input_size=input_size,
strides=[8, 16, 32, 64, 128, 256, 512],
basesize_ratio_range=(0.15, 0.9),
ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]))))
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=data,
unsubscribe_link="https://autogpt.com/unsubscribe",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(user_email, subject, full_message)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(self, user_email: str, subject: str, body: str):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
)
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=data,
unsubscribe_link="https://autogpt.com/unsubscribe",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(user_email, subject, full_message)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(self, user_email: str, subject: str, body: str):
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
)
|
"""Init file of LlamaIndex."""
__version__ = "0.12.13"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.12"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
from typing import TYPE_CHECKING
import paddle
if TYPE_CHECKING:
from paddle import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
a_n, b_n = x_mat.norm(axis=1)[:, None], y_mat.norm(axis=1)[:, None]
a_norm = x_mat / paddle.clip(a_n, min=eps)
b_norm = y_mat / paddle.clip(b_n, min=eps)
sim_mt = 1 - paddle.mm(a_norm, b_norm.transpose(perm=[1, 0]))
return sim_mt.numpy()
def sqeuclidean(
x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return (
paddle.sum(y_mat**2, axis=1)
+ paddle.sum(x_mat**2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
def euclidean(x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return paddle.sqrt(
paddle.sum(y_mat**2, axis=1)
+ paddle.sum(x_mat**2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
|
from typing import TYPE_CHECKING
import paddle
if TYPE_CHECKING:
from paddle import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
a_n, b_n = x_mat.norm(axis=1)[:, None], y_mat.norm(axis=1)[:, None]
a_norm = x_mat / paddle.clip(a_n, min=eps)
b_norm = y_mat / paddle.clip(b_n, min=eps)
sim_mt = 1 - paddle.mm(a_norm, b_norm.transpose(perm=[1, 0]))
return sim_mt.numpy()
def sqeuclidean(
x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return (
paddle.sum(y_mat ** 2, axis=1)
+ paddle.sum(x_mat ** 2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
def euclidean(x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return paddle.sqrt(
paddle.sum(y_mat ** 2, axis=1)
+ paddle.sum(x_mat ** 2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
|
"""Test the comparison chains."""
import re
import pytest
from langchain.evaluation.comparison.eval_chain import (
LabeledPairwiseStringEvalChain,
PairwiseStringEvalChain,
PairwiseStringResultOutputParser,
resolve_pairwise_criteria,
)
from langchain.evaluation.criteria.eval_chain import Criteria
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
val = resolve_pairwise_criteria(criterion)
assert isinstance(val, dict)
assert next(iter(val)) == criterion.value
def test_resolve_criteria_list_enum() -> None:
val = resolve_pairwise_criteria(list(Criteria))
assert isinstance(val, dict)
assert set(val.keys()) == {c.value for c in list(Criteria)}
def test_PairwiseStringResultOutputParser_parse() -> None:
output_parser = PairwiseStringResultOutputParser()
text = """I like pie better than cake.
[[A]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": "A",
"score": 1,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
text = """I like cake better than pie.
[[B]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": "B",
"score": 0,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
text = """I like cake and pie.
[[C]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": None,
"score": 0.5,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
def test_pairwise_string_comparison_chain() -> None:
llm = FakeLLM(
queries={
"a": "The values are the same.\n[[C]]",
"b": "A is clearly better than b.\n[[A]]",
"c": "B is clearly better than a.\n[[B]]",
},
sequential_responses=True,
)
chain = PairwiseStringEvalChain.from_llm(llm=llm)
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I love pie.",
input="What is your favorite food?",
)
assert res["value"] is None
assert res["score"] == 0.5
assert res["reasoning"] == "The values are the same.\n[[C]]"
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I like pie.",
input="What is your favorite food?",
)
assert res["value"] == "A"
assert res["score"] == 1
with pytest.warns(UserWarning, match=re.escape(chain._skip_reference_warning)):
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I hate pie.",
input="What is your favorite food?",
reference="I enjoy pie.",
)
assert res["value"] == "B"
assert res["score"] == 0
def test_labeled_pairwise_string_comparison_chain_missing_ref() -> None:
llm = FakeLLM(
queries={
"a": "The values are the same.\n[[C]]",
"b": "A is clearly better than b.\n[[A]]",
"c": "B is clearly better than a.\n[[B]]",
},
sequential_responses=True,
)
chain = LabeledPairwiseStringEvalChain.from_llm(llm=llm)
with pytest.raises(ValueError):
chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I love pie.",
input="What is your favorite food?",
)
|
"""Test the comparison chains."""
import re
import pytest
from langchain.evaluation.comparison.eval_chain import (
LabeledPairwiseStringEvalChain,
PairwiseStringEvalChain,
PairwiseStringResultOutputParser,
resolve_pairwise_criteria,
)
from langchain.evaluation.criteria.eval_chain import Criteria
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
val = resolve_pairwise_criteria(criterion)
assert isinstance(val, dict)
assert next(iter(val)) == criterion.value
def test_resolve_criteria_list_enum() -> None:
val = resolve_pairwise_criteria(list(Criteria))
assert isinstance(val, dict)
assert set(val.keys()) == set(c.value for c in list(Criteria))
def test_PairwiseStringResultOutputParser_parse() -> None:
output_parser = PairwiseStringResultOutputParser()
text = """I like pie better than cake.
[[A]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": "A",
"score": 1,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
text = """I like cake better than pie.
[[B]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": "B",
"score": 0,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
text = """I like cake and pie.
[[C]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": None,
"score": 0.5,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
def test_pairwise_string_comparison_chain() -> None:
llm = FakeLLM(
queries={
"a": "The values are the same.\n[[C]]",
"b": "A is clearly better than b.\n[[A]]",
"c": "B is clearly better than a.\n[[B]]",
},
sequential_responses=True,
)
chain = PairwiseStringEvalChain.from_llm(llm=llm)
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I love pie.",
input="What is your favorite food?",
)
assert res["value"] is None
assert res["score"] == 0.5
assert res["reasoning"] == "The values are the same.\n[[C]]"
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I like pie.",
input="What is your favorite food?",
)
assert res["value"] == "A"
assert res["score"] == 1
with pytest.warns(UserWarning, match=re.escape(chain._skip_reference_warning)):
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I hate pie.",
input="What is your favorite food?",
reference="I enjoy pie.",
)
assert res["value"] == "B"
assert res["score"] == 0
def test_labeled_pairwise_string_comparison_chain_missing_ref() -> None:
llm = FakeLLM(
queries={
"a": "The values are the same.\n[[C]]",
"b": "A is clearly better than b.\n[[A]]",
"c": "B is clearly better than a.\n[[B]]",
},
sequential_responses=True,
)
chain = LabeledPairwiseStringEvalChain.from_llm(llm=llm)
with pytest.raises(ValueError):
chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I love pie.",
input="What is your favorite food?",
)
|
from .depends import requires_admin_user, requires_user
from .jwt_utils import parse_jwt_token
from .middleware import APIKeyValidator, auth_middleware
from .models import User
__all__ = [
"parse_jwt_token",
"requires_user",
"requires_admin_user",
"APIKeyValidator",
"auth_middleware",
"User",
]
|
from .config import Settings
from .depends import requires_admin_user, requires_user
from .jwt_utils import parse_jwt_token
from .middleware import APIKeyValidator, auth_middleware
from .models import User
__all__ = [
"Settings",
"parse_jwt_token",
"requires_user",
"requires_admin_user",
"APIKeyValidator",
"auth_middleware",
"User",
]
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.6.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.1.5",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.6.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.1.5",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
}
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
import os
from abc import abstractmethod
from typing import Union
from unittest import mock
import pytest
from langchain_core.tools import BaseTool
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class ToolsTests(BaseStandardTests):
""":private:
Base class for testing tools. This won't show in the documentation, but
the docstrings will be inherited by subclasses.
"""
@property
@abstractmethod
def tool_constructor(self) -> Union[type[BaseTool], BaseTool]:
"""Returns a class or instance of a tool to be tested."""
...
@property
def tool_constructor_params(self) -> dict:
"""Returns a dictionary of parameters to pass to the tool constructor."""
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - it should not
have {"name", "id", "args"} keys.
"""
return {}
@pytest.fixture
def tool(self) -> BaseTool:
""":private:"""
if isinstance(self.tool_constructor, BaseTool):
if self.tool_constructor_params != {}:
msg = (
"If tool_constructor is an instance of BaseTool, "
"tool_constructor_params must be empty"
)
raise ValueError(msg)
return self.tool_constructor
return self.tool_constructor(**self.tool_constructor_params)
class ToolsUnitTests(ToolsTests):
"""Base class for tools unit tests."""
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""Return env vars, init args, and expected instance attrs for initializing
from env vars.
"""
return {}, {}, {}
def test_init(self) -> None:
"""Test that the tool can be initialized with :attr:`tool_constructor` and
:attr:`tool_constructor_params`. If this fails, check that the
keyword args defined in :attr:`tool_constructor_params` are valid.
"""
if isinstance(self.tool_constructor, BaseTool):
tool = self.tool_constructor
else:
tool = self.tool_constructor(**self.tool_constructor_params)
assert tool is not None
def test_init_from_env(self) -> None:
env_params, tools_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
tool = self.tool_constructor(**tools_params)
assert tool is not None
for k, expected in expected_attrs.items():
actual = getattr(tool, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
def test_has_name(self, tool: BaseTool) -> None:
"""Tests that the tool has a name attribute to pass to chat models.
If this fails, add a `name` parameter to your tool.
"""
assert tool.name
def test_has_input_schema(self, tool: BaseTool) -> None:
"""Tests that the tool has an input schema.
If this fails, add an `args_schema` to your tool.
See
`this guide <https://python.langchain.com/docs/how_to/custom_tools/#subclass-basetool>`_
and see how `CalculatorInput` is configured in the
`CustomCalculatorTool.args_schema` attribute
"""
assert tool.get_input_schema()
def test_input_schema_matches_invoke_params(self, tool: BaseTool) -> None:
"""Tests that the provided example params match the declared input schema.
If this fails, update the `tool_invoke_params_example` attribute to match
the input schema (`args_schema`) of the tool.
"""
# this will be a pydantic object
input_schema = tool.get_input_schema()
assert input_schema(**self.tool_invoke_params_example)
|
import os
from abc import abstractmethod
from typing import Union
from unittest import mock
import pytest
from langchain_core.tools import BaseTool
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class ToolsTests(BaseStandardTests):
"""
:private:
Base class for testing tools. This won't show in the documentation, but
the docstrings will be inherited by subclasses.
"""
@property
@abstractmethod
def tool_constructor(self) -> Union[type[BaseTool], BaseTool]:
"""
Returns a class or instance of a tool to be tested.
"""
...
@property
def tool_constructor_params(self) -> dict:
"""
Returns a dictionary of parameters to pass to the tool constructor.
"""
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - it should not
have {"name", "id", "args"} keys.
"""
return {}
@pytest.fixture
def tool(self) -> BaseTool:
"""
:private:
"""
if isinstance(self.tool_constructor, BaseTool):
if self.tool_constructor_params != {}:
msg = (
"If tool_constructor is an instance of BaseTool, "
"tool_constructor_params must be empty"
)
raise ValueError(msg)
return self.tool_constructor
return self.tool_constructor(**self.tool_constructor_params)
class ToolsUnitTests(ToolsTests):
"""
Base class for tools unit tests.
"""
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""Return env vars, init args, and expected instance attrs for initializing
from env vars."""
return {}, {}, {}
def test_init(self) -> None:
"""
Test that the tool can be initialized with :attr:`tool_constructor` and
:attr:`tool_constructor_params`. If this fails, check that the
keyword args defined in :attr:`tool_constructor_params` are valid.
"""
if isinstance(self.tool_constructor, BaseTool):
tool = self.tool_constructor
else:
tool = self.tool_constructor(**self.tool_constructor_params)
assert tool is not None
def test_init_from_env(self) -> None:
env_params, tools_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
tool = self.tool_constructor(**tools_params)
assert tool is not None
for k, expected in expected_attrs.items():
actual = getattr(tool, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
def test_has_name(self, tool: BaseTool) -> None:
"""
Tests that the tool has a name attribute to pass to chat models.
If this fails, add a `name` parameter to your tool.
"""
assert tool.name
def test_has_input_schema(self, tool: BaseTool) -> None:
"""
Tests that the tool has an input schema.
If this fails, add an `args_schema` to your tool.
See
`this guide <https://python.langchain.com/docs/how_to/custom_tools/#subclass-basetool>`_
and see how `CalculatorInput` is configured in the
`CustomCalculatorTool.args_schema` attribute
"""
assert tool.get_input_schema()
def test_input_schema_matches_invoke_params(self, tool: BaseTool) -> None:
"""
Tests that the provided example params match the declared input schema.
If this fails, update the `tool_invoke_params_example` attribute to match
the input schema (`args_schema`) of the tool.
"""
# this will be a pydantic object
input_schema = tool.get_input_schema()
assert input_schema(**self.tool_invoke_params_example)
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_utils import TORCH_VERSION
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
def set_random_seed(seed: Optional[int] = None,
deterministic: bool = False,
diff_rank_seed: bool = False) -> int:
"""Set random seed.
Args:
seed (int, optional): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Defaults to False.
diff_rank_seed (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Defaults to False.
"""
if seed is None:
seed = sync_random_seed()
if diff_rank_seed:
rank = get_rank()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
if torch.backends.cudnn.benchmark:
print_log(
'torch.backends.cudnn.benchmark is going to be set as '
'`False` to cause cuDNN to deterministically select an '
'algorithm',
logger='current',
level=logging.WARNING)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
torch.use_deterministic_algorithms(True)
return seed
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_utils import TORCH_VERSION
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
def set_random_seed(seed: Optional[int] = None,
deterministic: bool = False,
diff_rank_seed: bool = False) -> int:
"""Set random seed.
Args:
seed (int, optional): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
diff_rank_seed (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Default: False.
"""
if seed is None:
seed = sync_random_seed()
if diff_rank_seed:
rank = get_rank()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
if torch.backends.cudnn.benchmark:
print_log(
'torch.backends.cudnn.benchmark is going to be set as '
'`False` to cause cuDNN to deterministically select an '
'algorithm',
logger='current',
level=logging.WARNING)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
torch.use_deterministic_algorithms(True)
return seed
|
from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blob(self: 'T', **kwargs) -> 'T':
"""Convert :attr:`.uri` to :attr:`.blob` inplace.
Internally it downloads from the URI and set :attr:`blob`.
:param kwargs: keyword arguments to pass to `:meth:_uri_to_blob` such as timeout
:return: itself after processed
"""
self.blob = _uri_to_blob(self.uri, **kwargs)
return self
def convert_blob_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.blob` to data :attr:`.uri` in place.
Internally it first reads into blob and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = _to_datauri(self.mime_type, self.blob, charset, base64, binary=True)
return self
def save_blob_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.blob` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
fp.write(self.blob)
return self
|
from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blob(self: 'T') -> 'T':
"""Convert :attr:`.uri` to :attr:`.blob` inplace.
Internally it downloads from the URI and set :attr:`blob`.
:return: itself after processed
"""
self.blob = _uri_to_blob(self.uri)
return self
def convert_blob_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.blob` to data :attr:`.uri` in place.
Internally it first reads into blob and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = _to_datauri(self.mime_type, self.blob, charset, base64, binary=True)
return self
def save_blob_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.blob` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
fp.write(self.blob)
return self
|
import pytest
from langchain_core.agents import (
AgentActionMessageLog,
AgentFinish,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)
def test_not_an_ai() -> None:
parser = OpenAIFunctionsAgentOutputParser()
err = f"Expected an AI message got {SystemMessage!s}"
with pytest.raises(TypeError, match=err):
parser.invoke(SystemMessage(content="x"))
# Test: Model response (not a function call).
def test_model_response() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content="Model response.")
result = parser.invoke(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {"output": "Model response."}
assert result.log == "Model response."
# Test: Model response with a function call.
def test_func_call() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": '{"param": 42}'}
},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == {"param": 42}
assert result.log == (
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
)
assert result.message_log == [msg]
# Test: Model response with a function call for a function taking no arguments
def test_func_call_no_args() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": ""}},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == {}
assert result.log == ("\nInvoking: `foo` with `{}`\nresponded: LLM thoughts.\n\n")
assert result.message_log == [msg]
# Test: Model response with a function call (old style tools).
def test_func_call_oldstyle() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": '{"__arg1": "42"}'}
},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == "42"
assert result.log == "\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
assert result.message_log == [msg]
# Test: Invalid function call args.
def test_func_call_invalid() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
)
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
"because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
parser.invoke(msg)
|
import pytest
from langchain_core.agents import (
AgentActionMessageLog,
AgentFinish,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)
def test_not_an_ai() -> None:
parser = OpenAIFunctionsAgentOutputParser()
err = f"Expected an AI message got {str(SystemMessage)}"
with pytest.raises(TypeError, match=err):
parser.invoke(SystemMessage(content="x"))
# Test: Model response (not a function call).
def test_model_response() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content="Model response.")
result = parser.invoke(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {"output": "Model response."}
assert result.log == "Model response."
# Test: Model response with a function call.
def test_func_call() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": '{"param": 42}'}
},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == {"param": 42}
assert result.log == (
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
)
assert result.message_log == [msg]
# Test: Model response with a function call for a function taking no arguments
def test_func_call_no_args() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": ""}},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == {}
assert result.log == ("\nInvoking: `foo` with `{}`\nresponded: LLM thoughts.\n\n")
assert result.message_log == [msg]
# Test: Model response with a function call (old style tools).
def test_func_call_oldstyle() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": '{"__arg1": "42"}'}
},
)
result = parser.invoke(msg)
assert isinstance(result, AgentActionMessageLog)
assert result.tool == "foo"
assert result.tool_input == "42"
assert result.log == "\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
assert result.message_log == [msg]
# Test: Invalid function call args.
def test_func_call_invalid() -> None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
)
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
"because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
parser.invoke(msg)
|
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
logger = logging.getLogger(__name__)
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
if tuple(column_names) not in self._warned_columns:
self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
# Extract the feature columns
for column_name in column_names:
tokenized = self.tokenize_fn([row[column_name] for row in features])
for key, value in tokenized.items():
batch[f"{column_name}_{key}"] = value
return batch
def maybe_warn_about_column_order(self, column_names: list[str]) -> None:
"""Warn the user if the columns are likely not in the expected order."""
# A mapping from common column names to the expected index in the dataset
column_name_to_expected_idx = {
"anchor": 0,
"positive": 1,
"negative": 2,
"question": 0,
"answer": 1,
"query": 0,
"response": 1,
"hypothesis": 0,
"entailment": 1,
"contradiction": 2,
}
for column_name, expected_idx in column_name_to_expected_idx.items():
if column_name in column_names and column_names.index(column_name) != expected_idx:
if column_name in ("anchor", "positive", "negative"):
proposed_fix_columns = ["anchor", "positive", "negative"]
elif column_name in ("question", "answer"):
proposed_fix_columns = ["question", "answer"]
elif column_name in ("query", "response"):
proposed_fix_columns = ["query", "response"]
elif column_name in ("hypothesis", "entailment", "contradiction"):
proposed_fix_columns = ["hypothesis", "entailment", "contradiction"]
logger.warning(
f"Column {column_name!r} is at index {column_names.index(column_name)}, whereas "
f"a column with this name is usually expected at index {expected_idx}. Note that the column "
"order can be important for some losses, e.g. MultipleNegativesRankingLoss will always "
"consider the first column as the anchor and the second as the positive, regardless of "
"the dataset column names. Consider renaming the columns to match the expected order, e.g.:\n"
f"dataset = dataset.select_columns({proposed_fix_columns})"
)
# We only need to warn once per list of column names to prevent spamming the user
break
self._warned_columns.add(tuple(column_names))
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_train.json',
data_prefix=dict(img='Img/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_query.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_gallery.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_query.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_gallery.json',
metric=['bbox', 'segm'],
format_only=False)
|
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_train.json',
data_prefix=dict(img='Img/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_query.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_gallery.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_query.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_gallery.json',
metric=['bbox', 'segm'],
format_only=False)
|
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
""" # noqa: E501
templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}""" # noqa: E501
CHAT_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(templ1),
HumanMessagePromptTemplate.from_template(templ2),
]
)
templ = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}""" # noqa: E501
PROMPT = PromptTemplate.from_template(templ)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
|
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
"""
templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}"""
CHAT_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(templ1),
HumanMessagePromptTemplate.from_template(templ2),
]
)
templ = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}"""
PROMPT = PromptTemplate.from_template(templ)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean', 'DyReLU'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean'
]
|
from pathlib import Path
import librosa
import pytest
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert str(ex.vgg_model_path).endswith('vggish_model.ckpt')
assert str(ex.pca_model_path).endswith('vggish_pca_params.ckpt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder()
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
@pytest.mark.gpu
def test_embedding_dimension_gpu():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder(device='/GPU:0')
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
|
from pathlib import Path
import librosa
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert str(ex.vgg_model_path).endswith('vggish_model.ckpt')
assert str(ex.pca_model_path).endswith('vggish_pca_params.ckpt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder()
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import (DeltaXYWHBBoxCoder,
DeltaXYWHBBoxCoderForGLIP)
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
coder = DeltaXYWHBBoxCoderForGLIP()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 0.0000, 0.0000],
[0.1409, 0.1409, 1.8591, 1.8591],
[0.0000, 0.3161, 3.1945, 0.0000],
[5.0000, 5.0000, 4.0000, 4.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.cache import (
AstraDBCache,
AstraDBSemanticCache,
AzureCosmosDBSemanticCache,
CassandraCache,
CassandraSemanticCache,
FullLLMCache,
FullMd5LLMCache,
GPTCache,
InMemoryCache,
MomentoCache,
RedisCache,
RedisSemanticCache,
SQLAlchemyCache,
SQLAlchemyMd5Cache,
SQLiteCache,
UpstashRedisCache,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FullLLMCache": "langchain_community.cache",
"SQLAlchemyCache": "langchain_community.cache",
"SQLiteCache": "langchain_community.cache",
"UpstashRedisCache": "langchain_community.cache",
"RedisCache": "langchain_community.cache",
"RedisSemanticCache": "langchain_community.cache",
"GPTCache": "langchain_community.cache",
"MomentoCache": "langchain_community.cache",
"InMemoryCache": "langchain_community.cache",
"CassandraCache": "langchain_community.cache",
"CassandraSemanticCache": "langchain_community.cache",
"FullMd5LLMCache": "langchain_community.cache",
"SQLAlchemyMd5Cache": "langchain_community.cache",
"AstraDBCache": "langchain_community.cache",
"AstraDBSemanticCache": "langchain_community.cache",
"AzureCosmosDBSemanticCache": "langchain_community.cache",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AstraDBCache",
"AstraDBSemanticCache",
"AzureCosmosDBSemanticCache",
"CassandraCache",
"CassandraSemanticCache",
"FullLLMCache",
"FullMd5LLMCache",
"GPTCache",
"InMemoryCache",
"MomentoCache",
"RedisCache",
"RedisSemanticCache",
"SQLAlchemyCache",
"SQLAlchemyMd5Cache",
"SQLiteCache",
"UpstashRedisCache",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.cache import (
AstraDBCache,
AstraDBSemanticCache,
AzureCosmosDBSemanticCache,
CassandraCache,
CassandraSemanticCache,
FullLLMCache,
FullMd5LLMCache,
GPTCache,
InMemoryCache,
MomentoCache,
RedisCache,
RedisSemanticCache,
SQLAlchemyCache,
SQLAlchemyMd5Cache,
SQLiteCache,
UpstashRedisCache,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FullLLMCache": "langchain_community.cache",
"SQLAlchemyCache": "langchain_community.cache",
"SQLiteCache": "langchain_community.cache",
"UpstashRedisCache": "langchain_community.cache",
"RedisCache": "langchain_community.cache",
"RedisSemanticCache": "langchain_community.cache",
"GPTCache": "langchain_community.cache",
"MomentoCache": "langchain_community.cache",
"InMemoryCache": "langchain_community.cache",
"CassandraCache": "langchain_community.cache",
"CassandraSemanticCache": "langchain_community.cache",
"FullMd5LLMCache": "langchain_community.cache",
"SQLAlchemyMd5Cache": "langchain_community.cache",
"AstraDBCache": "langchain_community.cache",
"AstraDBSemanticCache": "langchain_community.cache",
"AzureCosmosDBSemanticCache": "langchain_community.cache",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FullLLMCache",
"SQLAlchemyCache",
"SQLiteCache",
"UpstashRedisCache",
"RedisCache",
"RedisSemanticCache",
"GPTCache",
"MomentoCache",
"InMemoryCache",
"CassandraCache",
"CassandraSemanticCache",
"FullMd5LLMCache",
"SQLAlchemyMd5Cache",
"AstraDBCache",
"AstraDBSemanticCache",
"AzureCosmosDBSemanticCache",
]
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime, timezone
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocVec[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocVec[InnerDoc])
for i, doc in enumerate(inner_docs):
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = 1
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == 1
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocArrayStacked[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocArrayStacked[InnerDoc])
for i, doc in enumerate(inner_docs):
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocArrayStacked[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = 1
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == 1
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import is_optimum_quanto_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
def replace_with_quanto_layers(
model,
quantization_config=None,
modules_to_not_convert=None,
current_key_name=None,
has_been_replaced=False,
):
"""
Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers.
Returns the converted model and a boolean that indicates if the conversion has been successful or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`AqlmConfig`, defaults to `None`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list`, *optional*, defaults to `None`):
A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be
converted.
current_key_name (`list`, *optional*, defaults to `None`):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*, defaults to `None`):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
"""
from accelerate import init_empty_weights
if is_optimum_quanto_available():
from optimum.quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8
w_mapping = {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2}
a_mapping = {None: None, "float8": qfloat8, "int8": qint8}
if modules_to_not_convert is None:
modules_to_not_convert = []
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if not any(key in ".".join(current_key_name) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(module, torch.nn.Linear):
model._modules[name] = QLinear(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
dtype=module.weight.dtype,
weights=w_mapping[quantization_config.weights],
activations=a_mapping[quantization_config.activations],
)
model._modules[name].requires_grad_(False)
has_been_replaced = True
elif isinstance(module, torch.nn.LayerNorm):
if quantization_config.activations is not None:
model._modules[name] = QLayerNorm(
module.normalized_shape,
module.eps,
module.elementwise_affine,
module.bias is not None,
activations=a_mapping[quantization_config.activations],
)
has_been_replaced = True
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_quanto_layers(
module,
quantization_config=quantization_config,
modules_to_not_convert=modules_to_not_convert,
current_key_name=current_key_name,
has_been_replaced=has_been_replaced,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import is_optimum_quanto_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
def replace_with_quanto_layers(
model,
quantization_config=None,
modules_to_not_convert=None,
current_key_name=None,
has_been_replaced=False,
):
"""
Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers.
Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`AqlmConfig`, defaults to `None`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list`, *optional*, defaults to `None`):
A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be
converted.
current_key_name (`list`, *optional*, defaults to `None`):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*, defaults to `None`):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
"""
from accelerate import init_empty_weights
if is_optimum_quanto_available():
from optimum.quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8
w_mapping = {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2}
a_mapping = {None: None, "float8": qfloat8, "int8": qint8}
if modules_to_not_convert is None:
modules_to_not_convert = []
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if not any(key in ".".join(current_key_name) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(module, torch.nn.Linear):
model._modules[name] = QLinear(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
dtype=module.weight.dtype,
weights=w_mapping[quantization_config.weights],
activations=a_mapping[quantization_config.activations],
)
model._modules[name].requires_grad_(False)
has_been_replaced = True
elif isinstance(module, torch.nn.LayerNorm):
if quantization_config.activations is not None:
model._modules[name] = QLayerNorm(
module.normalized_shape,
module.eps,
module.elementwise_affine,
module.bias is not None,
activations=a_mapping[quantization_config.activations],
)
has_been_replaced = True
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_quanto_layers(
module,
quantization_config=quantization_config,
modules_to_not_convert=modules_to_not_convert,
current_key_name=current_key_name,
has_been_replaced=has_been_replaced,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['PALETTE']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['PALETTE']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['PALETTE']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['PALETTE']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['PALETTE']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['PALETTE']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['PALETTE']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['PALETTE']
elif mmcv.is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .compose import Compose
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'PackDetInputs', 'Compose', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .compose import Compose
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'PackDetInputs', 'Compose', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromWebcam', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert'
]
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
@property
def _should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self.server.should_exit
@property
def should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._should_exit
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().shutdown()
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().shutdown()
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import (
SentenceTransformerModelCardCallback,
SentenceTransformerModelCardData,
)
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
pass
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseEncoderModelCardCallback(SentenceTransformerModelCardCallback):
pass
@dataclass
class SparseEncoderModelCardData(SentenceTransformerModelCardData):
"""A dataclass storing data used in the model card.
Args:
language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
e.g. "en" or ["en", "de", "nl"]
license (`Optional[str]`): The license of the model, e.g. "apache-2.0", "mit",
or "cc-by-nc-sa-4.0"
model_name (`Optional[str]`): The pretty name of the model, e.g. "SparseEncoder based on answerdotai/ModernBERT-base".
model_id (`Optional[str]`): The model ID when pushing the model to the Hub,
e.g. "tomaarsen/se-mpnet-base-ms-marco".
train_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the training datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}, {"name": "STSB"}]
eval_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the evaluation datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"id": "mteb/stsbenchmark-sts"}]
task_name (`str`): The human-readable task the model is trained on,
e.g. "semantic search and sparse retrieval".
tags (`Optional[List[str]]`): A list of tags for the model,
e.g. ["sentence-transformers", "sparse-encoder"].
.. tip::
Install `codecarbon <https://github.com/mlco2/codecarbon>`_ to automatically track carbon emission usage and
include it in your model cards.
Example::
>>> model = SparseEncoder(
... "microsoft/mpnet-base",
... model_card_data=SparseEncoderModelCardData(
... model_id="tomaarsen/se-mpnet-base-allnli",
... train_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... eval_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... license="apache-2.0",
... language="en",
... ),
... )
"""
# Potentially provided by the user
task_name: str = field(default=None)
tags: list[str] | None = field(
default_factory=lambda: [
"sentence-transformers",
"sparse-encoder",
]
)
# Automatically filled by `SparseEncoderModelCardCallback` and the Trainer directly
predict_example: list[list[str]] | None = field(default=None, init=False)
# Computed once, always unchanged
pipeline_tag: str = field(default=None, init=False)
template_path: Path = field(default=Path(__file__).parent / "model_card_template.md", init=False)
# Passed via `register_model` only
model: SparseEncoder | None = field(default=None, init=False, repr=False)
def register_model(self, model: SparseEncoder) -> None:
self.model = model
if self.task_name is None:
self.task_name = "semantic search and sparse retrieval"
if self.pipeline_tag is None:
self.pipeline_tag = "feature-extraction"
def tokenize(self, text: str | list[str]) -> dict[str, Any]:
return self.model.tokenizer(text)
def get_model_specific_metadata(self) -> dict[str, Any]:
return {
"model_max_length": self.model.get_max_seq_length(),
"output_dimensionality": self.model.get_sentence_embedding_dimension(),
}
|
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from huggingface_hub import ModelCard
from sentence_transformers.model_card import (
SentenceTransformerModelCardCallback,
SentenceTransformerModelCardData,
)
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
pass
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseEncoderModelCardCallback(SentenceTransformerModelCardCallback):
pass
@dataclass
class SparseEncoderModelCardData(SentenceTransformerModelCardData):
"""A dataclass storing data used in the model card.
Args:
language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
e.g. "en" or ["en", "de", "nl"]
license (`Optional[str]`): The license of the model, e.g. "apache-2.0", "mit",
or "cc-by-nc-sa-4.0"
model_name (`Optional[str]`): The pretty name of the model, e.g. "SparseEncoder based on answerdotai/ModernBERT-base".
model_id (`Optional[str]`): The model ID when pushing the model to the Hub,
e.g. "tomaarsen/se-mpnet-base-ms-marco".
train_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the training datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}, {"name": "STSB"}]
eval_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the evaluation datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"id": "mteb/stsbenchmark-sts"}]
task_name (`str`): The human-readable task the model is trained on,
e.g. "semantic search and sparse retrieval".
tags (`Optional[List[str]]`): A list of tags for the model,
e.g. ["sentence-transformers", "sparse-encoder"].
.. tip::
Install `codecarbon <https://github.com/mlco2/codecarbon>`_ to automatically track carbon emission usage and
include it in your model cards.
Example::
>>> model = SparseEncoder(
... "microsoft/mpnet-base",
... model_card_data=SparseEncoderModelCardData(
... model_id="tomaarsen/se-mpnet-base-allnli",
... train_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... eval_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... license="apache-2.0",
... language="en",
... ),
... )
"""
# Potentially provided by the user
task_name: str = field(default=None)
tags: list[str] | None = field(
default_factory=lambda: [
"sentence-transformers",
"sparse-encoder",
]
)
# Automatically filled by `SparseEncoderModelCardCallback` and the Trainer directly
predict_example: list[list[str]] | None = field(default=None, init=False)
# Computed once, always unchanged
pipeline_tag: str = field(default=None, init=False)
template_path: Path = field(default=Path(__file__).parent / "model_card_template.md", init=False)
# Passed via `register_model` only
model: SparseEncoder | None = field(default=None, init=False, repr=False)
def register_model(self, model: SparseEncoder) -> None:
self.model = model
if self.task_name is None:
self.task_name = "semantic search and sparse retrieval"
if self.pipeline_tag is None:
self.pipeline_tag = "feature-extraction"
def tokenize(self, text: str | list[str]) -> dict[str, Any]:
return self.model.tokenizer(text)
def get_model_specific_metadata(self) -> dict[str, Any]:
return {
"model_max_length": self.model.get_max_seq_length(),
"output_dimensionality": self.model.get_sentence_embedding_dimension(),
}
def generate_model_card(model: SparseEncoder) -> str:
template_path = Path(__file__).parent / "model_card_template.md"
model_card = ModelCard.from_template(card_data=model.model_card_data, template_path=template_path, hf_emoji="🤗")
return model_card.content
|
from typing import TypeVar
from docarray.proto import NodeProto
from docarray.typing.tensor import NdArray
T = TypeVar('T', bound='Embedding')
class Embedding(NdArray):
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:param field: field in which to store the content in the node proto
:return: the nested item protobuf message
"""
return super()._to_node_protobuf(field='embedding')
|
from typing import TypeVar
from docarray.proto import NodeProto
from docarray.typing.tensor import Tensor
T = TypeVar('T', bound='Embedding')
class Embedding(Tensor):
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:param field: field in which to store the content in the node proto
:return: the nested item protobuf message
"""
return super()._to_node_protobuf(field='embedding')
|
# Backwards compatibility.
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import (
LLM,
BaseLLM,
)
__all__ = [
"LLM",
"BaseLLM",
"BaseLanguageModel",
]
|
# Backwards compatibility.
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import (
LLM,
BaseLLM,
)
__all__ = [
"BaseLanguageModel",
"BaseLLM",
"LLM",
]
|
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super(WordWeights, self).__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
"{} of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: Dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[str, float], unknown_word_weight: float = 1):
"""
:param vocab:
Vocabulary of the tokenizer
:param word_weights:
Mapping of tokens to a float weight value. Words embeddings are multiplied by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values)
:param unknown_word_weight:
Weight for words in vocab, that do not appear in the word_weights lookup. These can be for example rare words in the vocab, where no weight exists.
"""
super(WordWeights, self).__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
"{} of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: Dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
from typing import List
from torch.utils.data import Dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset
and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader.
"""
def __init__(self, examples: List[InputExample], model: SentenceTransformer):
self.examples = examples
def __getitem__(self, item):
return self.examples[item]
def __len__(self):
return len(self.examples)
|
from torch.utils.data import Dataset
from typing import List
import torch
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset
and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader.
"""
def __init__(self,
examples: List[InputExample],
model: SentenceTransformer
):
self.examples = examples
def __getitem__(self, item):
return self.examples[item]
def __len__(self):
return len(self.examples)
|
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self.torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if not hasattr(self, "torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "torch_params"):
if variable.path in self.torch_params:
self.torch_params.pop(variable.path)
|
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self.torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if not hasattr(self, "torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "torch_params"):
self.torch_params.pop(variable.path)
|
"""Bing Search tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
ENDPOINT_BASE_URL = "https://api.bing.microsoft.com/v7.0/"
class BingSearchToolSpec(BaseToolSpec):
"""Bing Search tool spec."""
spec_functions = ["bing_news_search", "bing_image_search", "bing_video_search"]
def __init__(
self, api_key: str, lang: Optional[str] = "en-US", results: Optional[int] = 3
) -> None:
"""Initialize with parameters."""
self.api_key = api_key
self.lang = lang
self.results = results
def _bing_request(self, endpoint: str, query: str, keys: List[str]):
response = requests.get(
ENDPOINT_BASE_URL + endpoint,
headers={"Ocp-Apim-Subscription-Key": self.api_key},
params={"q": query, "mkt": self.lang, "count": self.results},
)
response_json = response.json()
return [[result[key] for key in keys] for result in response_json["value"]]
def bing_news_search(self, query: str):
"""
Make a query to bing news search. Useful for finding news on a query.
Args:
query (str): The query to be passed to bing.
"""
return self._bing_request("news/search", query, ["name", "description", "url"])
def bing_image_search(self, query: str):
"""
Make a query to bing images search. Useful for finding an image of a query.
Args:
query (str): The query to be passed to bing.
returns a url of the images found
"""
return self._bing_request("images/search", query, ["name", "contentUrl"])
def bing_video_search(self, query: str):
"""
Make a query to bing video search. Useful for finding a video related to a query.
Args:
query (str): The query to be passed to bing.
"""
return self._bing_request("videos/search", query, ["name", "contentUrl"])
|
"""Bing Search tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
ENDPOINT_BASE_URL = "https://api.bing.microsoft.com/v7.0/"
class BingSearchToolSpec(BaseToolSpec):
"""Bing Search tool spec."""
spec_functions = ["bing_news_search", "bing_image_search", "bing_video_search"]
def __init__(
self, api_key: str, lang: Optional[str] = "en-US", results: Optional[int] = 3
) -> None:
"""Initialize with parameters."""
self.api_key = api_key
self.lang = lang
self.results = results
def _bing_request(self, endpoint: str, query: str, keys: List[str]):
response = requests.get(
ENDPOINT_BASE_URL + endpoint,
headers={"Ocp-Apim-Subscription-Key": self.api_key},
params={"q": query, "mkt": self.lang, "count": self.results},
)
response_json = response.json()
return [[result[key] for key in keys] for result in response_json["value"]]
def bing_news_search(self, query: str):
"""
Make a query to bing news search. Useful for finding news on a query.
Args:
query (str): The query to be passed to bing.
"""
return self._bing_request("news/search", query, ["name", "description", "url"])
def bing_image_search(self, query: str):
"""
Make a query to bing images search. Useful for finding an image of a query.
Args:
query (str): The query to be passed to bing.
returns a url of the images found
"""
return self._bing_request("images/search", query, ["name", "contentUrl"])
def bing_video_search(self, query: str):
"""
Make a query to bing video search. Useful for finding a video related to a query.
Args:
query (str): The query to be passed to bing.
"""
return self._bing_request("videos/search", query, ["name", "contentUrl"])
|
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
]
|
from .Transformer import Transformer
from .Asym import Asym
from .BoW import BoW
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
from .CLIPModel import CLIPModel
__all__ = [
"Transformer",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']:
cfg.data.train.pipeline = [
p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale'
]
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
gt_seg = item.get('gt_semantic_seg', None)
if gt_seg is not None:
pad_value = 255 # the padding value of gt_seg
sem_labels = np.unique(gt_seg)
all_labels = np.concatenate((gt_labels, sem_labels), axis=0)
all_labels, counts = np.unique(all_labels, return_counts=True)
stuff_labels = all_labels[np.logical_and(counts < 2,
all_labels != pad_value)]
stuff_masks = gt_seg[None] == stuff_labels[:, None, None]
gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)
gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),
axis=0)
# If you need to show the bounding boxes,
# please comment the following line
gt_bboxes = None
imshow_det_bboxes(
item['img'],
gt_bboxes,
gt_labels,
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=dataset.PALETTE,
text_color=(200, 200, 200),
mask_color=dataset.PALETTE)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
from mmdet.utils import update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']:
cfg.data.train.pipeline = [
p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale'
]
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
gt_seg = item.get('gt_semantic_seg', None)
if gt_seg is not None:
pad_value = 255 # the padding value of gt_seg
sem_labels = np.unique(gt_seg)
all_labels = np.concatenate((gt_labels, sem_labels), axis=0)
all_labels, counts = np.unique(all_labels, return_counts=True)
stuff_labels = all_labels[np.logical_and(counts < 2,
all_labels != pad_value)]
stuff_masks = gt_seg[None] == stuff_labels[:, None, None]
gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)
gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),
axis=0)
# If you need to show the bounding boxes,
# please comment the following line
gt_bboxes = None
imshow_det_bboxes(
item['img'],
gt_bboxes,
gt_labels,
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=dataset.PALETTE,
text_color=(200, 200, 200),
mask_color=dataset.PALETTE)
progress_bar.update()
if __name__ == '__main__':
main()
|
_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_bbox_coder import BaseBBoxCoder
from .bucketing_bbox_coder import BucketingBBoxCoder
from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
from .pseudo_bbox_coder import PseudoBBoxCoder
from .tblr_bbox_coder import TBLRBBoxCoder
from .yolo_bbox_coder import YOLOBBoxCoder
__all__ = [
'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
'BucketingBBoxCoder'
]
|
from .base_bbox_coder import BaseBBoxCoder
from .bucketing_bbox_coder import BucketingBBoxCoder
from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
from .pseudo_bbox_coder import PseudoBBoxCoder
from .tblr_bbox_coder import TBLRBBoxCoder
from .yolo_bbox_coder import YOLOBBoxCoder
__all__ = [
'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
'BucketingBBoxCoder'
]
|
from .custom_image_torch_encoder import CustomImageTorchEncoder
|
from .custom_image_torch_encoder import CustomImageTorchEncoder
|
from unittest import TestCase
from datasets import List, Value
from datasets.arrow_dataset import Dataset
class DatasetListTest(TestCase):
def _create_example_records(self):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _create_example_dict(self):
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(data)
def test_create(self):
example_records = self._create_example_records()
dset = Dataset.from_list(example_records)
self.assertListEqual(dset.column_names, ["col_1", "col_2"])
for i, r in enumerate(dset):
self.assertDictEqual(r, example_records[i])
def test_list_dict_equivalent(self):
example_records = self._create_example_records()
dset = Dataset.from_list(example_records)
dset_from_dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info, dset_from_dict.info)
def test_uneven_records(self): # checks what happens with missing columns
uneven_records = [{"col_1": 1}, {"col_2": "x"}]
dset = Dataset.from_list(uneven_records)
self.assertDictEqual(dset[0], {"col_1": 1})
self.assertDictEqual(dset[1], {"col_1": None}) # NB: first record is used for columns
def test_variable_list_records(self): # checks if the type can be inferred from the second record
list_records = [{"col_1": []}, {"col_1": [1, 2]}]
dset = Dataset.from_list(list_records)
self.assertEqual(dset.info.features["col_1"], List(Value("int64")))
def test_create_empty(self):
dset = Dataset.from_list([])
self.assertEqual(len(dset), 0)
self.assertListEqual(dset.column_names, [])
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class DatasetListTest(TestCase):
def _create_example_records(self):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _create_example_dict(self):
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(data)
def test_create(self):
example_records = self._create_example_records()
dset = Dataset.from_list(example_records)
self.assertListEqual(dset.column_names, ["col_1", "col_2"])
for i, r in enumerate(dset):
self.assertDictEqual(r, example_records[i])
def test_list_dict_equivalent(self):
example_records = self._create_example_records()
dset = Dataset.from_list(example_records)
dset_from_dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info, dset_from_dict.info)
def test_uneven_records(self): # checks what happens with missing columns
uneven_records = [{"col_1": 1}, {"col_2": "x"}]
dset = Dataset.from_list(uneven_records)
self.assertDictEqual(dset[0], {"col_1": 1})
self.assertDictEqual(dset[1], {"col_1": None}) # NB: first record is used for columns
def test_variable_list_records(self): # checks if the type can be inferred from the second record
list_records = [{"col_1": []}, {"col_1": [1, 2]}]
dset = Dataset.from_list(list_records)
self.assertEqual(dset.info.features["col_1"], Sequence(Value("int64")))
def test_create_empty(self):
dset = Dataset.from_list([])
self.assertEqual(len(dset), 0)
self.assertListEqual(dset.column_names, [])
|
from typing import Dict, List
import torch
from torchaudio._internal import module_utils as _mod_utils
@_mod_utils.requires_sox()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_seed(seed)
@_mod_utils.requires_sox()
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
@_mod_utils.requires_sox()
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size)
@_mod_utils.requires_sox()
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_use_threads(use_threads)
@_mod_utils.requires_sox()
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torch.ops.torchaudio.sox_utils_list_effects())
@_mod_utils.requires_sox()
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_read_formats()
@_mod_utils.requires_sox()
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_write_formats()
@_mod_utils.requires_sox()
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torch.ops.torchaudio.sox_utils_get_buffer_size()
|
from typing import List, Dict
import torch
from torchaudio._internal import module_utils as _mod_utils
@_mod_utils.requires_sox()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_seed(seed)
@_mod_utils.requires_sox()
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
@_mod_utils.requires_sox()
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size)
@_mod_utils.requires_sox()
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_use_threads(use_threads)
@_mod_utils.requires_sox()
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torch.ops.torchaudio.sox_utils_list_effects())
@_mod_utils.requires_sox()
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_read_formats()
@_mod_utils.requires_sox()
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_write_formats()
@_mod_utils.requires_sox()
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torch.ops.torchaudio.sox_utils_get_buffer_size()
|
"""Tests using Scikit-Learn's bundled estimator_checks."""
from contextlib import contextmanager
import pytest
import sklearn
from packaging.version import parse as parse_version
from sklearn.utils.estimator_checks import parametrize_with_checks
import keras
from keras.src.backend import floatx
from keras.src.backend import set_floatx
from keras.src.layers import Dense
from keras.src.layers import Input
from keras.src.models import Model
from keras.src.wrappers import SKLearnClassifier
from keras.src.wrappers import SKLearnRegressor
from keras.src.wrappers import SKLearnTransformer
def wrapped_parametrize_with_checks(
estimators,
*,
legacy=True,
expected_failed_checks=None,
):
"""Wrapped `parametrize_with_checks` handling backwards compat."""
sklearn_version = parse_version(
parse_version(sklearn.__version__).base_version
)
if sklearn_version >= parse_version("1.6"):
return parametrize_with_checks(
estimators,
legacy=legacy,
expected_failed_checks=expected_failed_checks,
)
def patched_more_tags(estimator, expected_failed_checks):
import copy
original_tags = copy.deepcopy(sklearn.utils._tags._safe_tags(estimator))
def patched_more_tags(self):
original_tags.update({"_xfail_checks": expected_failed_checks})
return original_tags
estimator.__class__._more_tags = patched_more_tags
return estimator
estimators = [
patched_more_tags(estimator, expected_failed_checks(estimator))
for estimator in estimators
]
# legacy is not supported and ignored
return parametrize_with_checks(estimators)
def dynamic_model(X, y, loss, layers=[10]):
"""Creates a basic MLP classifier dynamically choosing binary/multiclass
classification loss and ouput activations.
"""
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = [Dense(n_outputs, activation="softmax")(hidden)]
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
@contextmanager
def use_floatx(x):
"""Context manager to temporarily
set the keras backend precision.
"""
_floatx = floatx()
set_floatx(x)
try:
yield
finally:
set_floatx(_floatx)
EXPECTED_FAILED_CHECKS = {
"SKLearnClassifier": {
"check_classifiers_regression_target": "not an issue in sklearn>=1.6",
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
"check_classifiers_one_label_sample_weights": (
"0 sample weight is not ignored"
),
"check_classifiers_classes": (
"with small test cases the estimator returns not all classes "
"sometimes"
),
"check_classifier_data_not_an_array": (
"This test assumes reproducibility in fit."
),
"check_supervised_y_2d": "This test assumes reproducibility in fit.",
"check_fit_idempotent": "This test assumes reproducibility in fit.",
},
"SKLearnRegressor": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
"SKLearnTransformer": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
}
@wrapped_parametrize_with_checks(
estimators=[
SKLearnClassifier(
model=dynamic_model,
model_kwargs={
"loss": "categorical_crossentropy",
"layers": [20, 20, 20],
},
fit_kwargs={"epochs": 5},
),
SKLearnRegressor(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
SKLearnTransformer(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
],
expected_failed_checks=lambda estimator: EXPECTED_FAILED_CHECKS[
type(estimator).__name__
],
)
def test_sklearn_estimator_checks(estimator, check):
"""Checks that can be passed with sklearn's default tolerances
and in a single epoch.
"""
try:
check(estimator)
except Exception as exc:
if keras.config.backend() in ["numpy", "openvino"] and (
isinstance(exc, NotImplementedError)
or "NotImplementedError" in str(exc)
):
pytest.xfail("Backend not implemented")
else:
raise
|
"""Tests using Scikit-Learn's bundled estimator_checks."""
from contextlib import contextmanager
import pytest
import sklearn
from packaging.version import parse as parse_version
from sklearn.utils.estimator_checks import parametrize_with_checks
import keras
from keras.src.backend import floatx
from keras.src.backend import set_floatx
from keras.src.layers import Dense
from keras.src.layers import Input
from keras.src.models import Model
from keras.src.wrappers import SKLearnClassifier
from keras.src.wrappers import SKLearnRegressor
from keras.src.wrappers import SKLearnTransformer
def wrapped_parametrize_with_checks(
estimators,
*,
legacy: bool = True,
expected_failed_checks=None,
):
"""Wrapped `parametrize_with_checks` handling backwards compat."""
sklearn_version = parse_version(
parse_version(sklearn.__version__).base_version
)
if sklearn_version >= parse_version("1.6"):
return parametrize_with_checks(
estimators,
legacy=legacy,
expected_failed_checks=expected_failed_checks,
)
def patched_more_tags(estimator, expected_failed_checks):
import copy
original_tags = copy.deepcopy(sklearn.utils._tags._safe_tags(estimator))
def patched_more_tags(self):
original_tags.update({"_xfail_checks": expected_failed_checks})
return original_tags
estimator.__class__._more_tags = patched_more_tags
return estimator
estimators = [
patched_more_tags(estimator, expected_failed_checks(estimator))
for estimator in estimators
]
# legacy is not supported and ignored
return parametrize_with_checks(estimators)
def dynamic_model(X, y, loss, layers=[10]):
"""Creates a basic MLP classifier dynamically choosing binary/multiclass
classification loss and ouput activations.
"""
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = [Dense(n_outputs, activation="softmax")(hidden)]
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
@contextmanager
def use_floatx(x: str):
"""Context manager to temporarily
set the keras backend precision.
"""
_floatx = floatx()
set_floatx(x)
try:
yield
finally:
set_floatx(_floatx)
EXPECTED_FAILED_CHECKS = {
"SKLearnClassifier": {
"check_classifiers_regression_target": "not an issue in sklearn>=1.6",
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
"check_classifiers_one_label_sample_weights": (
"0 sample weight is not ignored"
),
"check_classifiers_classes": (
"with small test cases the estimator returns not all classes "
"sometimes"
),
"check_classifier_data_not_an_array": (
"This test assumes reproducibility in fit."
),
"check_supervised_y_2d": "This test assumes reproducibility in fit.",
"check_fit_idempotent": "This test assumes reproducibility in fit.",
},
"SKLearnRegressor": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
"SKLearnTransformer": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
}
@wrapped_parametrize_with_checks(
estimators=[
SKLearnClassifier(
model=dynamic_model,
model_kwargs={
"loss": "categorical_crossentropy",
"layers": [20, 20, 20],
},
fit_kwargs={"epochs": 5},
),
SKLearnRegressor(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
SKLearnTransformer(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
],
expected_failed_checks=lambda estimator: EXPECTED_FAILED_CHECKS[
type(estimator).__name__
],
)
def test_sklearn_estimator_checks(estimator, check):
"""Checks that can be passed with sklearn's default tolerances
and in a single epoch.
"""
try:
check(estimator)
except Exception as exc:
if keras.config.backend() in ["numpy", "openvino"] and (
isinstance(exc, NotImplementedError)
or "NotImplementedError" in str(exc)
):
pytest.xfail("Backend not implemented")
else:
raise
|
import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D NumPy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> keras.utils.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
pad either before or after each sequence.
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to `0.`)
Returns:
NumPy array with shape `(len(sequences), maxlen)`
"""
if not hasattr(sequences, "__len__"):
raise ValueError("`sequences` must be iterable.")
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError(
"`sequences` must be a list of iterables. "
f"Found non-iterable: {str(x)}"
) from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.unicode_
)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError(
f"`dtype` {dtype} is not compatible with `value`'s type: "
f"{type(value)}\nYou should set `dtype=object` for variable length "
"strings."
)
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == "pre":
trunc = s[-maxlen:]
elif truncating == "post":
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f"Shape of sample {trunc.shape[1:]} of sequence at "
f"position {idx} is different from expected shape "
f"{sample_shape}"
)
if padding == "post":
x[idx, : len(trunc)] = trunc
elif padding == "pre":
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
|
import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D NumPy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> keras.utils.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
pad either before or after each sequence.
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to 0.)
Returns:
NumPy array with shape `(len(sequences), maxlen)`
"""
if not hasattr(sequences, "__len__"):
raise ValueError("`sequences` must be iterable.")
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError(
"`sequences` must be a list of iterables. "
f"Found non-iterable: {str(x)}"
) from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.unicode_
)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError(
f"`dtype` {dtype} is not compatible with `value`'s type: "
f"{type(value)}\nYou should set `dtype=object` for variable length "
"strings."
)
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == "pre":
trunc = s[-maxlen:]
elif truncating == "post":
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f"Shape of sample {trunc.shape[1:]} of sequence at "
f"position {idx} is different from expected shape "
f"{sample_shape}"
)
if padding == "post":
x[idx, : len(trunc)] = trunc
elif padding == "pre":
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocList[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocList[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocList) -> DocList:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocList[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
docs=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocList[MyImage])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocList[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocList[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocList) -> DocList:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocList[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
docs=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocList[MyImage])
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def after_iter(self, runner):
if self.every_n_iters(runner, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information: virtual memory, swap
memory and memory of current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def after_iter(self, runner):
if self.every_n_iters(runner, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
'MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
class ImageTabularChartReader(BaseReader):
"""
Image parser.
Extract tabular data from a chart or figure.
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
max_output_tokens=512,
prompt: str = "Generate underlying data table of the figure below:",
):
"""Init params."""
if parser_config is None:
try:
import torch
from PIL import Image # noqa: F401
from transformers import (
Pix2StructForConditionalGeneration,
Pix2StructProcessor,
)
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers Pillow`"
)
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
processor = Pix2StructProcessor.from_pretrained("google/deplot")
model = Pix2StructForConditionalGeneration.from_pretrained(
"google/deplot", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
self._parser_config = parser_config
self._keep_image = keep_image
self._max_output_tokens = max_output_tokens
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
out = model.generate(**inputs, max_new_tokens=self._max_output_tokens)
text_str = "Figure or chart with tabular data: " + processor.decode(
out[0], skip_special_tokens=True
)
return [
ImageDocument(
text=text_str,
image=image_str,
extra_info=extra_info or {},
)
]
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
class ImageTabularChartReader(BaseReader):
"""Image parser.
Extract tabular data from a chart or figure.
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
max_output_tokens=512,
prompt: str = "Generate underlying data table of the figure below:",
):
"""Init params."""
if parser_config is None:
try:
import torch
from PIL import Image # noqa: F401
from transformers import (
Pix2StructForConditionalGeneration,
Pix2StructProcessor,
)
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers Pillow`"
)
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
processor = Pix2StructProcessor.from_pretrained("google/deplot")
model = Pix2StructForConditionalGeneration.from_pretrained(
"google/deplot", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
self._parser_config = parser_config
self._keep_image = keep_image
self._max_output_tokens = max_output_tokens
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
out = model.generate(**inputs, max_new_tokens=self._max_output_tokens)
text_str = "Figure or chart with tabular data: " + processor.decode(
out[0], skip_special_tokens=True
)
return [
ImageDocument(
text=text_str,
image=image_str,
extra_info=extra_info or {},
)
]
|
from typing import AsyncGenerator, Generator, Optional
import pytest
from jina import Client, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.helper import random_port
class MyDocument(Document):
text: str
number: Optional[int]
class OutputDocument(Document):
text: str
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: MyDocument, **kwargs) -> MyDocument:
for i in range(100):
yield MyDocument(text=f'{doc.text} {doc.number + i}')
class CustomResponseExecutor(Executor):
@requests(on='/task1')
async def task1(self, doc: MyDocument, **kwargs) -> OutputDocument:
for i in range(100):
yield OutputDocument(text=f'{doc.text} {doc.number}-{i}-task1')
@requests(on='/task2')
async def task2(
self, doc: MyDocument, **kwargs
) -> Generator[OutputDocument, None, None]:
for i in range(100):
yield OutputDocument(text=f'{doc.text} {doc.number}-{i}-task2')
@requests(on='/task3')
async def task3(
self, doc: MyDocument, **kwargs
) -> AsyncGenerator[OutputDocument, None]:
for i in range(100):
yield OutputDocument(text=f'{doc.text} {doc.number}-{i}-task3')
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
async def test_streaming_deployment(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 10
async for doc in client.stream_doc(
on='/hello',
inputs=MyDocument(text='hello world', number=i),
return_type=MyDocument,
):
assert doc.text == f'hello world {i}'
i += 1
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('endpoint', ['task1', 'task2', 'task3'])
async def test_streaming_custom_response(protocol, endpoint):
from jina import Deployment
port = random_port()
with Deployment(
uses=CustomResponseExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
async for doc in client.stream_doc(
on=f'/{endpoint}',
inputs=MyDocument(text='hello world', number=5),
return_type=OutputDocument,
):
assert doc.text == f'hello world 5-{i}-{endpoint}'
i += 1
class Executor1(Executor):
@requests
def generator(self, doc: MyDocument, **kwargs) -> MyDocument:
yield MyDocument(text='new document')
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
class Executor2(Executor):
@requests
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, doc: MyDocument, **kwargs):
yield MyDocument(text='new document')
class Executor3(Executor):
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, doc: MyDocument, **kwargs):
yield MyDocument(text='new document')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'executor,expected',
[
('Executor1', {'/default': True, '/non_generator': False}),
('Executor2', {'/default': False, '/generator': True}),
('Executor3', {'/generator': True, '/non_generator': False}),
],
)
async def test_endpoint_discovery(executor, expected):
from google.protobuf import json_format
from jina.logging.logger import JinaLogger
from jina.parsers import set_pod_parser
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
args = set_pod_parser().parse_args(['--uses', executor])
handler = WorkerRequestHandler(args, JinaLogger('data request handler'))
res = await handler.endpoint_discovery(None, None)
for endpoint, is_generator in expected.items():
assert (
json_format.MessageToDict(res.schemas)[endpoint]['is_generator']
== is_generator
)
|
from typing import Optional
import pytest
from jina import Client, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.helper import random_port
class MyDocument(Document):
text: str
number: Optional[int]
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: MyDocument, **kwargs):
for i in range(100):
yield MyDocument(text=f'{doc.text} {doc.number + i}')
@pytest.mark.asyncio
async def test_streaming_sse_http_deployment():
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol='http',
cors=True,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol='http', cors=True, asyncio=True)
i = 10
async for doc in client.stream_doc(
on='/hello',
inputs=MyDocument(text='hello world', number=i),
return_type=MyDocument,
):
assert doc.text == f'hello world {i}'
i += 1
class Executor1(Executor):
@requests
def generator(self, doc: MyDocument, **kwargs):
yield MyDocument(text='new document')
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
class Executor2(Executor):
@requests
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, doc: MyDocument, **kwargs):
yield MyDocument(text='new document')
class Executor3(Executor):
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, doc: MyDocument, **kwargs):
yield MyDocument(text='new document')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'executor,expected',
[
('Executor1', {'/default': True, '/non_generator': False}),
('Executor2', {'/default': False, '/generator': True}),
('Executor3', {'/generator': True, '/non_generator': False}),
],
)
async def test_endpoint_discovery(executor, expected):
from google.protobuf import json_format
from jina.logging.logger import JinaLogger
from jina.parsers import set_pod_parser
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
args = set_pod_parser().parse_args(['--uses', executor])
handler = WorkerRequestHandler(args, JinaLogger('data request handler'))
res = await handler.endpoint_discovery(None, None)
for endpoint, is_generator in expected.items():
assert (
json_format.MessageToDict(res.schemas)[endpoint]['is_generator']
== is_generator
)
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/')))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.15.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.15.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_global_policy,
)
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_global_policy,
)
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
|
from argparse import ArgumentParser
from pathlib import Path
import mir_eval
import torch
from lightning_train import _get_dataloader, _get_model, sisdri_metric
def _eval(model, data_loader, device):
results = torch.zeros(4)
with torch.no_grad():
for _, batch in enumerate(data_loader):
mix, src, mask = batch
mix, src, mask = mix.to(device), src.to(device), mask.to(device)
est = model(mix)
sisdri = sisdri_metric(est, src, mix, mask)
src = src.cpu().detach().numpy()
est = est.cpu().detach().numpy()
mix = mix.repeat(1, src.shape[1], 1).cpu().detach().numpy()
sdr, sir, sar, _ = mir_eval.separation.bss_eval_sources(src[0], est[0])
sdr_mix, sir_mix, sar_mix, _ = mir_eval.separation.bss_eval_sources(src[0], mix[0])
results += torch.tensor(
[sdr.mean() - sdr_mix.mean(), sisdri, sir.mean() - sir_mix.mean(), sar.mean() - sar_mix.mean()]
)
results /= len(data_loader)
print("SDR improvement: ", results[0].item())
print("Si-SDR improvement: ", results[1].item())
print("SIR improvement: ", results[2].item())
print("SAR improvement: ", results[3].item())
def cli_main():
parser = ArgumentParser()
parser.add_argument("--dataset", default="librimix", type=str, choices=["wsj0mix", "librimix"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``Libri2Mix`` or ``Libri3Mix`` is stored.",
)
parser.add_argument(
"--librimix-tr-split",
default="train-360",
choices=["train-360", "train-100"],
help="The training partition of librimix dataset. (default: ``train-360``)",
)
parser.add_argument(
"--librimix-task",
default="sep_clean",
type=str,
choices=["sep_clean", "sep_noisy", "enh_single", "enh_both"],
help="The task to perform (separation or enhancement, noisy or clean). (default: ``sep_clean``)",
)
parser.add_argument(
"--num-speakers", default=2, type=int, help="The number of speakers in the mixture. (default: 2)"
)
parser.add_argument(
"--sample-rate",
default=8000,
type=int,
help="Sample rate of audio files in the given dataset. (default: 8000)",
)
parser.add_argument(
"--exp-dir", default=Path("./exp"), type=Path, help="The directory to save checkpoints and logs."
)
parser.add_argument("--gpu-device", default=-1, type=int, help="The gpu device for model inference. (default: -1)")
args = parser.parse_args()
model = _get_model(num_sources=2)
state_dict = torch.load(args.exp_dir / "best_model.pth")
model.load_state_dict(state_dict)
if args.gpu_device != -1:
device = torch.device("cuda:" + str(args.gpu_device))
else:
device = torch.device("cpu")
model = model.to(device)
_, _, eval_loader = _get_dataloader(
args.dataset,
args.root_dir,
args.num_speakers,
args.sample_rate,
1, # batch size is set to 1 to avoid masking
0, # set num_workers to 0
args.librimix_task,
args.librimix_tr_split,
)
_eval(model, eval_loader, device)
if __name__ == "__main__":
cli_main()
|
from argparse import ArgumentParser
from pathlib import Path
import mir_eval
import torch
from lightning_train import _get_dataloader, _get_model, sisdri_metric
def _eval(model, data_loader, device):
results = torch.zeros(4)
with torch.no_grad():
for _, batch in enumerate(data_loader):
mix, src, mask = batch
mix, src, mask = mix.to(device), src.to(device), mask.to(device)
est = model(mix)
sisdri = sisdri_metric(est, src, mix, mask)
src = src.cpu().detach().numpy()
est = est.cpu().detach().numpy()
mix = mix.repeat(1, src.shape[1], 1).cpu().detach().numpy()
sdr, sir, sar, _ = mir_eval.separation.bss_eval_sources(src[0], est[0])
sdr_mix, sir_mix, sar_mix, _ = mir_eval.separation.bss_eval_sources(src[0], mix[0])
results += torch.tensor(
[sdr.mean() - sdr_mix.mean(), sisdri, sir.mean() - sir_mix.mean(), sar.mean() - sar_mix.mean()]
)
results /= len(data_loader)
print("SDR improvement: ", results[0].item())
print("Si-SDR improvement: ", results[1].item())
print("SIR improvement: ", results[2].item())
print("SAR improvement: ", results[3].item())
def cli_main():
parser = ArgumentParser()
parser.add_argument("--dataset", default="librimix", type=str, choices=["wsj0-mix", "librimix"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``Libri2Mix`` or ``Libri3Mix`` is stored.",
)
parser.add_argument(
"--librimix-tr-split",
default="train-360",
choices=["train-360", "train-100"],
help="The training partition of librimix dataset. (default: ``train-360``)",
)
parser.add_argument(
"--librimix-task",
default="sep_clean",
type=str,
choices=["sep_clean", "sep_noisy", "enh_single", "enh_both"],
help="The task to perform (separation or enhancement, noisy or clean). (default: ``sep_clean``)",
)
parser.add_argument(
"--num-speakers", default=2, type=int, help="The number of speakers in the mixture. (default: 2)"
)
parser.add_argument(
"--sample-rate",
default=8000,
type=int,
help="Sample rate of audio files in the given dataset. (default: 8000)",
)
parser.add_argument(
"--exp-dir", default=Path("./exp"), type=Path, help="The directory to save checkpoints and logs."
)
parser.add_argument("--gpu-device", default=-1, type=int, help="The gpu device for model inference. (default: -1)")
args = parser.parse_args()
model = _get_model(num_sources=2)
state_dict = torch.load(args.exp_dir / "best_model.pth")
model.load_state_dict(state_dict)
if args.gpu_device != -1:
device = torch.device("cuda:" + str(args.gpu_device))
else:
device = torch.device("cpu")
model = model.to(device)
_, _, eval_loader = _get_dataloader(
args.dataset,
args.data_dir,
args.num_speakers,
args.sample_rate,
1, # batch size is set to 1 to avoid masking
0, # set num_workers to 0
args.librimix_task,
args.librimix_tr_split,
)
_eval(model, eval_loader, device)
if __name__ == "__main__":
cli_main()
|
from collections import Counter
from typing import Tuple, Dict, Union, Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri
if TYPE_CHECKING:
from docarray.typing import T
class TextDataMixin:
"""Provide helper functions for :class:`Document` to support text data."""
def load_uri_to_text(self: 'T', charset: str = 'utf-8') -> 'T':
"""Convert :attr:`.uri` to :attr`.text` inplace.
:param charset: charset may be any character set registered with IANA
:return: itself after processed
"""
blob = _uri_to_blob(self.uri)
self.text = blob.decode(charset)
return self
def get_vocabulary(self, text_attrs: Tuple[str, ...] = ('text',)) -> Dict[str, int]:
"""Get the text vocabulary in a counter dict that maps from the word to its frequency from all :attr:`text_fields`.
:param text_attrs: the textual attributes where vocabulary will be derived from
:return: a vocabulary in dictionary where key is the word, value is the frequency of that word in all text fields.
"""
all_tokens = Counter()
for f in text_attrs:
all_tokens.update(_text_to_word_sequence(getattr(self, f)))
return all_tokens
def convert_text_to_tensor(
self: 'T',
vocab: Dict[str, int],
max_length: Optional[int] = None,
dtype: str = 'int64',
) -> 'T':
"""Convert :attr:`.text` to :attr:`.tensor` inplace.
In the end :attr:`.tensor` will be a 1D array where `D` is `max_length`.
To get the vocab of a DocumentArray, you can use `jina.types.document.converters.build_vocab` to
:param vocab: a dictionary that maps a word to an integer index, `0` is reserved for padding, `1` is reserved
for unknown words in :attr:`.text`. So you should *not* include these two entries in `vocab`.
:param max_length: the maximum length of the sequence. Sequence longer than this are cut off from *beginning*.
Sequence shorter than this will be padded with `0` from right hand side.
:param dtype: the dtype of the generated :attr:`.tensor`
:return: Document itself after processed
"""
self.tensor = np.array(
_text_to_int_sequence(self.text, vocab, max_length), dtype=dtype
)
return self
def convert_tensor_to_text(
self: 'T', vocab: Union[Dict[str, int], Dict[int, str]], delimiter: str = ' '
) -> 'T':
"""Convert :attr:`.tensor` to :attr:`.text` inplace.
:param vocab: a dictionary that maps a word to an integer index, `0` is reserved for padding, `1` is reserved
for unknown words in :attr:`.text`
:param delimiter: the delimiter that used to connect all words into :attr:`.text`
:return: Document itself after processed
"""
if isinstance(list(vocab.keys())[0], str):
_vocab = {v: k for k, v in vocab.items()}
_text = []
for k in self.tensor:
k = int(k)
if k == 0:
continue
elif k == 1:
_text.append('<UNK>')
else:
_text.append(_vocab.get(k, '<UNK>'))
self.text = delimiter.join(_text)
return self
def convert_text_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.text` to data :attr:`.uri`.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data.
Sometimes used for text data that frequently uses non-US-ASCII characters.
:return: itself after processed
"""
self.uri = _to_datauri(self.mime_type, self.text, charset, base64, binary=False)
return self
def _text_to_word_sequence(
text, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', split=' '
):
translate_dict = {c: split for c in filters}
translate_map = str.maketrans(translate_dict)
text = text.lower().translate(translate_map)
seq = text.split(split)
for i in seq:
if i:
yield i
def _text_to_int_sequence(text, vocab, max_len=None):
seq = _text_to_word_sequence(text)
vec = [vocab.get(s, 1) for s in seq]
if max_len:
if len(vec) < max_len:
vec = [0] * (max_len - len(vec)) + vec
elif len(vec) > max_len:
vec = vec[-max_len:]
return vec
|
from collections import Counter
from typing import Tuple, Dict, Union, Optional, TYPE_CHECKING
import numpy as np
from .helper import _uri_to_blob, _to_datauri
if TYPE_CHECKING:
from ...typing import T
class TextDataMixin:
"""Provide helper functions for :class:`Document` to support text data."""
def load_uri_to_text(self: 'T', charset: str = 'utf-8') -> 'T':
"""Convert :attr:`.uri` to :attr`.text` inplace.
:param charset: charset may be any character set registered with IANA
:return: itself after processed
"""
blob = _uri_to_blob(self.uri)
self.text = blob.decode(charset)
return self
def get_vocabulary(self, text_attrs: Tuple[str, ...] = ('text',)) -> Dict[str, int]:
"""Get the text vocabulary in a counter dict that maps from the word to its frequency from all :attr:`text_fields`.
:param text_attrs: the textual attributes where vocabulary will be derived from
:return: a vocabulary in dictionary where key is the word, value is the frequency of that word in all text fields.
"""
all_tokens = Counter()
for f in text_attrs:
all_tokens.update(_text_to_word_sequence(getattr(self, f)))
return all_tokens
def convert_text_to_tensor(
self: 'T',
vocab: Dict[str, int],
max_length: Optional[int] = None,
dtype: str = 'int64',
) -> 'T':
"""Convert :attr:`.text` to :attr:`.tensor` inplace.
In the end :attr:`.tensor` will be a 1D array where `D` is `max_length`.
To get the vocab of a DocumentArray, you can use `jina.types.document.converters.build_vocab` to
:param vocab: a dictionary that maps a word to an integer index, `0` is reserved for padding, `1` is reserved
for unknown words in :attr:`.text`. So you should *not* include these two entries in `vocab`.
:param max_length: the maximum length of the sequence. Sequence longer than this are cut off from *beginning*.
Sequence shorter than this will be padded with `0` from right hand side.
:param dtype: the dtype of the generated :attr:`.tensor`
:return: Document itself after processed
"""
self.tensor = np.array(
_text_to_int_sequence(self.text, vocab, max_length), dtype=dtype
)
return self
def convert_tensor_to_text(
self: 'T', vocab: Union[Dict[str, int], Dict[int, str]], delimiter: str = ' '
) -> 'T':
"""Convert :attr:`.tensor` to :attr:`.text` inplace.
:param vocab: a dictionary that maps a word to an integer index, `0` is reserved for padding, `1` is reserved
for unknown words in :attr:`.text`
:param delimiter: the delimiter that used to connect all words into :attr:`.text`
:return: Document itself after processed
"""
if isinstance(list(vocab.keys())[0], str):
_vocab = {v: k for k, v in vocab.items()}
_text = []
for k in self.tensor:
k = int(k)
if k == 0:
continue
elif k == 1:
_text.append('<UNK>')
else:
_text.append(_vocab.get(k, '<UNK>'))
self.text = delimiter.join(_text)
return self
def convert_text_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.text` to data :attr:`.uri`.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data.
Sometimes used for text data that frequently uses non-US-ASCII characters.
:return: itself after processed
"""
self.uri = _to_datauri(self.mime_type, self.text, charset, base64, binary=False)
return self
def _text_to_word_sequence(
text, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', split=' '
):
translate_dict = {c: split for c in filters}
translate_map = str.maketrans(translate_dict)
text = text.lower().translate(translate_map)
seq = text.split(split)
for i in seq:
if i:
yield i
def _text_to_int_sequence(text, vocab, max_len=None):
seq = _text_to_word_sequence(text)
vec = [vocab.get(s, 1) for s in seq]
if max_len:
if len(vec) < max_len:
vec = [0] * (max_len - len(vec)) + vec
elif len(vec) > max_len:
vec = vec[-max_len:]
return vec
|
from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorAfterReload'
|
from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorAfterReload'
|
"""Copyright 2024, XGBoost contributors"""
import json
import os
import tempfile
from typing import Type, Union
import numpy as np
import pytest
import xgboost as xgb
pl = pytest.importorskip("polars")
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_polars_basic(
DMatrixT: Union[Type[xgb.DMatrix], Type[xgb.QuantileDMatrix]]
) -> None:
df = pl.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
Xy = DMatrixT(df)
assert Xy.num_row() == df.shape[0]
assert Xy.num_col() == df.shape[1]
assert Xy.num_nonmissing() == np.prod(df.shape)
# feature info
assert Xy.feature_names == df.columns
assert Xy.feature_types == ["int", "int"]
res = Xy.get_data().toarray()
res1 = df.to_numpy()
if isinstance(Xy, xgb.QuantileDMatrix):
# skip min values in the cut.
np.testing.assert_allclose(res[1:, :], res1[1:, :])
else:
np.testing.assert_allclose(res, res1)
# boolean
df = pl.DataFrame({"a": [True, False, False], "b": [False, False, True]})
Xy = DMatrixT(df)
np.testing.assert_allclose(
Xy.get_data().data, np.array([1, 0, 0, 0, 0, 1]), atol=1e-5
)
def test_polars_missing() -> None:
df = pl.DataFrame({"a": [1, None, 3], "b": [3, 4, None]})
Xy = xgb.DMatrix(df)
assert Xy.num_row() == df.shape[0]
assert Xy.num_col() == df.shape[1]
assert Xy.num_nonmissing() == 4
np.testing.assert_allclose(Xy.get_data().data, np.array([1, 3, 4, 3]))
np.testing.assert_allclose(Xy.get_data().indptr, np.array([0, 2, 3, 4]))
np.testing.assert_allclose(Xy.get_data().indices, np.array([0, 1, 1, 0]))
ser = pl.Series("y", np.arange(0, df.shape[0]))
Xy.set_info(label=ser)
booster = xgb.train({}, Xy, num_boost_round=1)
predt0 = booster.inplace_predict(df)
predt1 = booster.predict(Xy)
np.testing.assert_allclose(predt0, predt1)
def test_classififer() -> None:
from sklearn.datasets import make_classification, make_multilabel_classification
X, y = make_classification(random_state=2024)
X_df = pl.DataFrame(X)
y_ser = pl.Series(y)
clf0 = xgb.XGBClassifier()
clf0.fit(X_df, y_ser)
clf1 = xgb.XGBClassifier()
clf1.fit(X, y)
with tempfile.TemporaryDirectory() as tmpdir:
path0 = os.path.join(tmpdir, "clf0.json")
clf0.save_model(path0)
path1 = os.path.join(tmpdir, "clf1.json")
clf1.save_model(path1)
with open(path0, "r") as fd:
model0 = json.load(fd)
with open(path1, "r") as fd:
model1 = json.load(fd)
model0["learner"]["feature_names"] = []
model0["learner"]["feature_types"] = []
assert model0 == model1
predt0 = clf0.predict(X)
predt1 = clf1.predict(X)
np.testing.assert_allclose(predt0, predt1)
assert (clf0.feature_names_in_ == X_df.columns).all()
assert clf0.n_features_in_ == X_df.shape[1]
X, y = make_multilabel_classification(128)
X_df = pl.DataFrame(X)
y_df = pl.DataFrame(y)
clf = xgb.XGBClassifier(n_estimators=1)
clf.fit(X_df, y_df)
assert clf.n_classes_ == 2
X, y = make_classification(n_classes=3, n_informative=5)
X_df = pl.DataFrame(X)
y_ser = pl.Series(y)
clf = xgb.XGBClassifier(n_estimators=1)
clf.fit(X_df, y_ser)
assert clf.n_classes_ == 3
def test_regressor() -> None:
from sklearn.datasets import make_regression
X, y = make_regression(n_targets=3)
X_df = pl.DataFrame(X)
y_df = pl.DataFrame(y)
assert y_df.shape[1] == 3
reg0 = xgb.XGBRegressor()
reg0.fit(X_df, y_df)
reg1 = xgb.XGBRegressor()
reg1.fit(X, y)
predt0 = reg0.predict(X)
predt1 = reg1.predict(X)
np.testing.assert_allclose(predt0, predt1)
def test_categorical() -> None:
import polars as pl
df = pl.DataFrame(
{"f0": [1, 2, 3], "b": ["a", "b", "c"]},
schema=[("a", pl.Int64()), ("b", pl.Categorical())]
)
with pytest.raises(NotImplementedError, match="Categorical feature"):
xgb.DMatrix(df, enable_categorical=True)
|
"""Copyright 2024, XGBoost contributors"""
import json
import os
import tempfile
from typing import Type, Union
import numpy as np
import pytest
import xgboost as xgb
pl = pytest.importorskip("polars")
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_polars_basic(
DMatrixT: Union[Type[xgb.DMatrix], Type[xgb.QuantileDMatrix]]
) -> None:
df = pl.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
Xy = DMatrixT(df)
assert Xy.num_row() == df.shape[0]
assert Xy.num_col() == df.shape[1]
assert Xy.num_nonmissing() == np.prod(df.shape)
# feature info
assert Xy.feature_names == df.columns
assert Xy.feature_types == ["int", "int"]
res = Xy.get_data().toarray()
res1 = df.to_numpy()
if isinstance(Xy, xgb.QuantileDMatrix):
# skip min values in the cut.
np.testing.assert_allclose(res[1:, :], res1[1:, :])
else:
np.testing.assert_allclose(res, res1)
# boolean
df = pl.DataFrame({"a": [True, False, False], "b": [False, False, True]})
Xy = DMatrixT(df)
np.testing.assert_allclose(
Xy.get_data().data, np.array([1, 0, 0, 0, 0, 1]), atol=1e-5
)
def test_polars_missing() -> None:
df = pl.DataFrame({"a": [1, None, 3], "b": [3, 4, None]})
Xy = xgb.DMatrix(df)
assert Xy.num_row() == df.shape[0]
assert Xy.num_col() == df.shape[1]
assert Xy.num_nonmissing() == 4
np.testing.assert_allclose(Xy.get_data().data, np.array([1, 3, 4, 3]))
np.testing.assert_allclose(Xy.get_data().indptr, np.array([0, 2, 3, 4]))
np.testing.assert_allclose(Xy.get_data().indices, np.array([0, 1, 1, 0]))
ser = pl.Series("y", np.arange(0, df.shape[0]))
Xy.set_info(label=ser)
booster = xgb.train({}, Xy, num_boost_round=1)
predt0 = booster.inplace_predict(df)
predt1 = booster.predict(Xy)
np.testing.assert_allclose(predt0, predt1)
def test_classififer() -> None:
from sklearn.datasets import make_classification, make_multilabel_classification
X, y = make_classification(random_state=2024)
X_df = pl.DataFrame(X)
y_ser = pl.Series(y)
clf0 = xgb.XGBClassifier()
clf0.fit(X_df, y_ser)
clf1 = xgb.XGBClassifier()
clf1.fit(X, y)
with tempfile.TemporaryDirectory() as tmpdir:
path0 = os.path.join(tmpdir, "clf0.json")
clf0.save_model(path0)
path1 = os.path.join(tmpdir, "clf1.json")
clf1.save_model(path1)
with open(path0, "r") as fd:
model0 = json.load(fd)
with open(path1, "r") as fd:
model1 = json.load(fd)
model0["learner"]["feature_names"] = []
model0["learner"]["feature_types"] = []
assert model0 == model1
predt0 = clf0.predict(X)
predt1 = clf1.predict(X)
np.testing.assert_allclose(predt0, predt1)
assert (clf0.feature_names_in_ == X_df.columns).all()
assert clf0.n_features_in_ == X_df.shape[1]
X, y = make_multilabel_classification(128)
X_df = pl.DataFrame(X)
y_df = pl.DataFrame(y)
clf = xgb.XGBClassifier(n_estimators=1)
clf.fit(X_df, y_df)
assert clf.n_classes_ == 2
X, y = make_classification(n_classes=3, n_informative=5)
X_df = pl.DataFrame(X)
y_ser = pl.Series(y)
clf = xgb.XGBClassifier(n_estimators=1)
clf.fit(X_df, y_ser)
assert clf.n_classes_ == 3
def test_regressor() -> None:
from sklearn.datasets import make_regression
X, y = make_regression(n_targets=3)
X_df = pl.DataFrame(X)
y_df = pl.DataFrame(y)
assert y_df.shape[1] == 3
reg0 = xgb.XGBRegressor()
reg0.fit(X_df, y_df)
reg1 = xgb.XGBRegressor()
reg1.fit(X, y)
predt0 = reg0.predict(X)
predt1 = reg1.predict(X)
np.testing.assert_allclose(predt0, predt1)
|
import re
import tempfile
import unittest
from pathlib import Path
from datasets.utils.metadata import DatasetMetadata
def _dedent(string: str) -> str:
indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines())
return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)])
README_YAML = """\
---
language:
- zh
- en
task_ids:
- sentiment-classification
---
# Begin of markdown
Some cool dataset card
"""
README_EMPTY_YAML = """\
---
---
# Begin of markdown
Some cool dataset card
"""
README_NO_YAML = """\
# Begin of markdown
Some cool dataset card
"""
class TestMetadataUtils(unittest.TestCase):
def test_metadata_dict_from_readme(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_YAML)
metadata_dict = DatasetMetadata.from_readme(path)
self.assertDictEqual(metadata_dict, {"language": ["zh", "en"], "task_ids": ["sentiment-classification"]})
with open(path, "w+") as readme_file:
readme_file.write(README_EMPTY_YAML)
metadata_dict = DatasetMetadata.from_readme(path)
self.assertDictEqual(metadata_dict, {})
with open(path, "w+") as readme_file:
readme_file.write(README_NO_YAML)
metadata_dict = DatasetMetadata.from_readme(path)
self.assertEqual(metadata_dict, {})
def test_from_yaml_string(self):
valid_yaml_string = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
assert DatasetMetadata.from_yaml_string(valid_yaml_string)
duplicate_yaml_keys = _dedent(
"""\
annotations_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
DatasetMetadata.from_yaml_string(duplicate_yaml_keys)
valid_yaml_with_optional_keys = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- text-classification
task_ids:
- multi-class-classification
paperswithcode_id:
- squad
configs:
- en
train-eval-index:
- config: en
task: text-classification
task_id: multi_class_classification
splits:
train_split: train
eval_split: test
col_mapping:
text: text
label: target
metrics:
- type: accuracy
name: Accuracy
extra_gated_prompt: |
By clicking on “Access repository” below, you also agree to ImageNet Terms of Access:
[RESEARCHER_FULLNAME] (the "Researcher") has requested permission to use the ImageNet database (the "Database") at Princeton University and Stanford University. In exchange for such permission, Researcher hereby agrees to the following terms and conditions:
1. Researcher shall use the Database only for non-commercial research and educational purposes.
extra_gated_fields:
Company: text
Country: text
I agree to use this model for non-commerical use ONLY: checkbox
"""
)
assert DatasetMetadata.from_yaml_string(valid_yaml_with_optional_keys)
|
import re
import tempfile
import unittest
from pathlib import Path
from datasets.utils.metadata import DatasetMetadata
def _dedent(string: str) -> str:
indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines())
return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)])
README_YAML = """\
---
language:
- zh
- en
task_ids:
- sentiment-classification
---
# Begin of markdown
Some cool dataset card
"""
README_EMPTY_YAML = """\
---
---
# Begin of markdown
Some cool dataset card
"""
README_NO_YAML = """\
# Begin of markdown
Some cool dataset card
"""
class TestMetadataUtils(unittest.TestCase):
def test_metadata_dict_from_readme(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_YAML)
metadata_dict = DatasetMetadata.from_readme(path)
self.assertDictEqual(metadata_dict, {"language": ["zh", "en"], "task_ids": ["sentiment-classification"]})
with open(path, "w+") as readme_file:
readme_file.write(README_EMPTY_YAML)
metadata_dict = DatasetMetadata.from_readme(path)
self.assertDictEqual(metadata_dict, {})
with open(path, "w+") as readme_file:
readme_file.write(README_NO_YAML)
metadata_dict = DatasetMetadata.from_readme(path)
self.assertEqual(metadata_dict, {})
def test_from_yaml_string(self):
valid_yaml_string = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
assert DatasetMetadata.from_yaml_string(valid_yaml_string)
duplicate_yaml_keys = _dedent(
"""\
annotations_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
DatasetMetadata.from_yaml_string(duplicate_yaml_keys)
valid_yaml_with_optional_keys = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- text-classification
task_ids:
- multi-class-classification
paperswithcode_id:
- squad
configs:
- en
train-eval-index:
- config: en
task: text-classification
task_id: multi_class_classification
splits:
train_split: train
eval_split: test
col_mapping:
text: text
label: target
metrics:
- type: accuracy
name: Accuracy
extra_gated_prompt: |
By clicking on “Access repository” below, you also agree to ImageNet Terms of Access:
[RESEARCHER_FULLNAME] (the "Researcher") has requested permission to use the ImageNet database (the "Database") at Princeton University and Stanford University. In exchange for such permission, Researcher hereby agrees to the following terms and conditions:
1. Researcher shall use the Database only for non-commercial research and educational purposes.
extra_gated_fields:
Company: text
Country: text
I agree to use this model for non-commerical use ONLY: checkbox
"""
)
assert DatasetMetadata.from_yaml_string(valid_yaml_with_optional_keys)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.