input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
from tests import TOYDATA_DIR
REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TEXT_FILES = [
str(TOYDATA_DIR / 'penal_colony.txt'),
str(TOYDATA_DIR / 'test.md'),
str(TOYDATA_DIR / 'test.html'),
str(TOYDATA_DIR / 'test.css'),
str(TOYDATA_DIR / 'test.csv'),
str(TOYDATA_DIR / 'test.log'),
]
LOCAL_TEXT_FILES_AND_BEGINNING = [
(str(TOYDATA_DIR / 'penal_colony.txt'), '“It’s a peculiar apparatus,”'),
(str(TOYDATA_DIR / 'test.md'), "# Hello"),
(str(TOYDATA_DIR / 'test.html'), "<html>"),
(str(TOYDATA_DIR / 'test.css'), 'body {'),
(str(TOYDATA_DIR / 'test.csv'), "John,Doe"),
(str(TOYDATA_DIR / 'test.log'), "2022-11-25 12:34:56 INFO: Program started"),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TEXT_FILE, '<!DOCTYPE html>'), *LOCAL_TEXT_FILES_AND_BEGINNING],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_proto_text_url(url):
uri = parse_obj_as(TextUrl, url)
proto = uri._to_node_protobuf()
assert 'text_url' in str(proto)
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
@pytest.mark.internet
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.mp3',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='TextUrl'):
parse_obj_as(TextUrl, path_to_file)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
from docutils import nodes
from docutils.parsers.rst import Directive
class BetaStatus(Directive):
has_content = True
text = "The {api_name} is in Beta stage, and backward compatibility is not guaranteed."
node = nodes.warning
def run(self):
text = self.text.format(api_name=" ".join(self.content))
return [self.node("", nodes.paragraph("", "", nodes.Text(text)))]
def setup(app):
app.add_directive("betastatus", BetaStatus)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from docutils import nodes
from docutils.parsers.rst import Directive
class BetaStatus(Directive):
has_content = True
text = "The {api_name} is in Beta stage, and backward compatibility is not guaranteed."
node = nodes.warning
def run(self):
text = self.text.format(api_name=" ".join(self.content))
return [self.node("", nodes.paragraph("", "", nodes.Text(text)))]
class V2BetaStatus(BetaStatus):
text = (
"The {api_name} is in Beta stage, and while we do not expect disruptive breaking changes, "
"some APIs may slightly change according to user feedback. Please submit any feedback you may have "
"in this issue: https://github.com/pytorch/vision/issues/6753."
)
node = nodes.note
def setup(app):
app.add_directive("betastatus", BetaStatus)
app.add_directive("v2betastatus", V2BetaStatus)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial002 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert (
'"syntaxHighlight": false' not in response.text
), "not used parameters should not be included"
assert (
'"syntaxHighlight": {"theme": "obsidian"}' in response.text
), "parameters with middle dots should be included in a JSON compatible way"
assert (
'"dom_id": "#swagger-ui"' in response.text
), "default configs should be preserved"
assert "presets: [" in response.text, "default configs should be preserved"
assert (
"SwaggerUIBundle.presets.apis," in response.text
), "default configs should be preserved"
assert (
"SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text
), "default configs should be preserved"
assert (
'"layout": "BaseLayout",' in response.text
), "default configs should be preserved"
assert (
'"deepLinking": true,' in response.text
), "default configs should be preserved"
assert (
'"showExtensions": true,' in response.text
), "default configs should be preserved"
assert (
'"showCommonExtensions": true,' in response.text
), "default configs should be preserved"
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial002 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert (
'"syntaxHighlight": false' not in response.text
), "not used parameters should not be included"
assert (
'"syntaxHighlight.theme": "obsidian"' in response.text
), "parameters with middle dots should be included in a JSON compatible way"
assert (
'"dom_id": "#swagger-ui"' in response.text
), "default configs should be preserved"
assert "presets: [" in response.text, "default configs should be preserved"
assert (
"SwaggerUIBundle.presets.apis," in response.text
), "default configs should be preserved"
assert (
"SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text
), "default configs should be preserved"
assert (
'"layout": "BaseLayout",' in response.text
), "default configs should be preserved"
assert (
'"deepLinking": true,' in response.text
), "default configs should be preserved"
assert (
'"showExtensions": true,' in response.text
), "default configs should be preserved"
assert (
'"showCommonExtensions": true,' in response.text
), "default configs should be preserved"
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
import numpy as np
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
import numpy as np
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
import os
import re
import subprocess
from keras.src import backend
# For torch, use index url to avoid installing nvidia drivers for the test.
BACKEND_REQ = {
"tensorflow": ("tensorflow-cpu", ""),
"torch": (
"torch",
"--extra-index-url https://download.pytorch.org/whl/cpu ",
),
"jax": ("jax[cpu]", ""),
"openvino": ("openvino", ""),
}
def setup_package():
subprocess.run("rm -rf tmp_build_dir", shell=True)
build_process = subprocess.run(
"python3 pip_build.py",
capture_output=True,
text=True,
shell=True,
)
print(build_process.stdout)
whl_path = re.findall(
r"[^\s]*\.whl",
build_process.stdout,
)[-1]
if not whl_path:
print(build_process.stderr)
raise ValueError("Installing Keras package unsuccessful. ")
return whl_path
def create_virtualenv():
env_setup = [
# Create virtual environment
"python3 -m venv test_env",
]
os.environ["PATH"] = (
"/test_env/bin/" + os.pathsep + os.environ.get("PATH", "")
)
run_commands_local(env_setup)
def manage_venv_installs(whl_path):
other_backends = list(set(BACKEND_REQ.keys()) - {backend.backend()})
backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()]
install_setup = [
# Installs the backend's package and common requirements
"pip install " + backend_extra_url + backend_pkg,
"pip install -r requirements-common.txt",
"pip install pytest",
# Ensure other backends are uninstalled
"pip uninstall -y "
+ BACKEND_REQ[other_backends[0]][0]
+ " "
+ BACKEND_REQ[other_backends[1]][0]
+ " "
+ BACKEND_REQ[other_backends[2]][0],
# Install `.whl` package
"pip install " + whl_path,
]
run_commands_venv(install_setup)
def run_keras_flow():
test_script = [
# Runs the example script
"python -m pytest integration_tests/basic_full_flow.py",
]
run_commands_venv(test_script)
def cleanup():
cleanup_script = [
# Exits virtual environment, deletes files, and any
# miscellaneous install logs
"exit",
"rm -rf test_env",
"rm -rf tmp_build_dir",
"rm -f *+cpu",
]
run_commands_local(cleanup_script)
def run_commands_local(commands):
for command in commands:
print(f"Running command: {command}")
subprocess.run(command, shell=True)
def run_commands_venv(commands):
for command in commands:
print(f"Running command: {command}")
cmd_with_args = command.split(" ")
cmd_with_args[0] = "test_env/bin/" + cmd_with_args[0]
p = subprocess.Popen(cmd_with_args)
assert p.wait() == 0
def test_keras_imports():
try:
# Ensures packages from all backends are installed.
# Builds Keras core package and returns package file path.
whl_path = setup_package()
# Creates and activates a virtual environment.
create_virtualenv()
# Ensures the backend's package is installed
# and the other backends are uninstalled.
manage_venv_installs(whl_path)
# Runs test of basic flow in Keras Core.
# Tests for backend-specific imports and `model.fit()`.
run_keras_flow()
# Removes virtual environment and associated files
finally:
cleanup()
if __name__ == "__main__":
test_keras_imports()
|
import os
import re
import subprocess
from keras.src import backend
# For torch, use index url to avoid installing nvidia drivers for the test.
BACKEND_REQ = {
"tensorflow": ("tensorflow-cpu", ""),
"torch": (
"torch torchvision",
"--extra-index-url https://download.pytorch.org/whl/cpu ",
),
"jax": ("jax[cpu]", ""),
"openvino": ("openvino", ""),
}
def setup_package():
subprocess.run("rm -rf tmp_build_dir", shell=True)
build_process = subprocess.run(
"python3 pip_build.py",
capture_output=True,
text=True,
shell=True,
)
print(build_process.stdout)
whl_path = re.findall(
r"[^\s]*\.whl",
build_process.stdout,
)[-1]
if not whl_path:
print(build_process.stderr)
raise ValueError("Installing Keras package unsuccessful. ")
return whl_path
def create_virtualenv():
env_setup = [
# Create virtual environment
"python3 -m venv test_env",
]
os.environ["PATH"] = (
"/test_env/bin/" + os.pathsep + os.environ.get("PATH", "")
)
run_commands_local(env_setup)
def manage_venv_installs(whl_path):
other_backends = list(set(BACKEND_REQ.keys()) - {backend.backend()})
backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()]
install_setup = [
# Installs the backend's package and common requirements
"pip install " + backend_extra_url + backend_pkg,
"pip install -r requirements-common.txt",
"pip install pytest",
# Ensure other backends are uninstalled
"pip uninstall -y "
+ BACKEND_REQ[other_backends[0]][0]
+ " "
+ BACKEND_REQ[other_backends[1]][0]
+ " "
+ BACKEND_REQ[other_backends[2]][0],
# Install `.whl` package
"pip install " + whl_path,
]
run_commands_venv(install_setup)
def run_keras_flow():
test_script = [
# Runs the example script
"python -m pytest integration_tests/basic_full_flow.py",
]
run_commands_venv(test_script)
def cleanup():
cleanup_script = [
# Exits virtual environment, deletes files, and any
# miscellaneous install logs
"exit",
"rm -rf test_env",
"rm -rf tmp_build_dir",
"rm -f *+cpu",
]
run_commands_local(cleanup_script)
def run_commands_local(commands):
for command in commands:
print(f"Running command: {command}")
subprocess.run(command, shell=True)
def run_commands_venv(commands):
for command in commands:
print(f"Running command: {command}")
cmd_with_args = command.split(" ")
cmd_with_args[0] = "test_env/bin/" + cmd_with_args[0]
p = subprocess.Popen(cmd_with_args)
assert p.wait() == 0
def test_keras_imports():
try:
# Ensures packages from all backends are installed.
# Builds Keras core package and returns package file path.
whl_path = setup_package()
# Creates and activates a virtual environment.
create_virtualenv()
# Ensures the backend's package is installed
# and the other backends are uninstalled.
manage_venv_installs(whl_path)
# Runs test of basic flow in Keras Core.
# Tests for backend-specific imports and `model.fit()`.
run_keras_flow()
# Removes virtual environment and associated files
finally:
cleanup()
if __name__ == "__main__":
test_keras_imports()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, StepLR, StepMomentum,
StepParamScheduler, _ParamScheduler)
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
DefaultOptimizerConstructor, build_optimizer)
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, StepLR, StepMomentum,
StepParamScheduler, _ParamScheduler)
__all__ = [
'OPTIMIZER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optimizer',
'DefaultOptimizerConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler'
]
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"zip_equal",
]
|
from langchain_core.documents import Document
from langchain_core.language_models import FakeListChatModel
from langchain.retrievers.document_compressors import LLMChainFilter
def test_llm_chain_filter() -> None:
documents = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.",
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(responses=["YES", "YES", "NO"])
doc_compressor = LLMChainFilter.from_llm(llm)
output = doc_compressor.compress_documents(
documents,
"Tell me about Candlepin bowling.",
)
expected = documents[:2]
assert output == expected
async def test_llm_chain_extractor_async() -> None:
documents = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.",
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(responses=["YES", "YES", "NO"])
doc_compressor = LLMChainFilter.from_llm(llm)
output = await doc_compressor.acompress_documents(
documents,
"Tell me about Candlepin bowling.",
)
expected = documents[:2]
assert output == expected
|
from langchain_core.documents import Document
from langchain_core.language_models import FakeListChatModel
from langchain.retrievers.document_compressors import LLMChainFilter
def test_llm_chain_filter() -> None:
documents = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.",
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(responses=["YES", "YES", "NO"])
doc_compressor = LLMChainFilter.from_llm(llm)
output = doc_compressor.compress_documents(
documents, "Tell me about Candlepin bowling."
)
expected = documents[:2]
assert output == expected
async def test_llm_chain_extractor_async() -> None:
documents = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.",
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(responses=["YES", "YES", "NO"])
doc_compressor = LLMChainFilter.from_llm(llm)
output = await doc_compressor.acompress_documents(
documents, "Tell me about Candlepin bowling."
)
expected = documents[:2]
assert output == expected
|
_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_bytes(protocol, compress, show_progress):
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_base64(protocol, compress, show_progress):
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
import pytest
from docarray import BaseDocument
from docarray.typing import NdArray
from docarray.documents import Image
from docarray import DocumentArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_bytes(protocol, compress, show_progress):
da = DocumentArray[MyDoc](
[
MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png')),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=Image()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_base64(protocol, compress, show_progress):
da = DocumentArray[MyDoc](
[
MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png')),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=Image()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import (convert_sync_batchnorm, detect_anomalous_params,
merge_dict, revert_sync_batchnorm, stack_batch)
from .weight_init import (BaseInit, Caffe2XavierInit, ConstantInit,
KaimingInit, NormalInit, PretrainedInit,
TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init,
constant_init, initialize, kaiming_init, normal_init,
trunc_normal_init, uniform_init, update_init_info,
xavier_init)
from .wrappers import (BaseTTAModel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential', 'revert_sync_batchnorm', 'update_init_info',
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
'bias_init_with_prob', 'BaseInit', 'ConstantInit', 'XavierInit',
'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit',
'Caffe2XavierInit', 'PretrainedInit', 'initialize',
'convert_sync_batchnorm', 'BaseTTAModel'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import (convert_sync_batchnorm, detect_anomalous_params,
merge_dict, revert_sync_batchnorm, stack_batch)
from .weight_init import (BaseInit, Caffe2XavierInit, ConstantInit,
KaimingInit, NormalInit, PretrainedInit,
TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init,
constant_init, initialize, kaiming_init, normal_init,
trunc_normal_init, uniform_init, update_init_info,
xavier_init)
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential', 'revert_sync_batchnorm', 'update_init_info',
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
'bias_init_with_prob', 'BaseInit', 'ConstantInit', 'XavierInit',
'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit',
'Caffe2XavierInit', 'PretrainedInit', 'initialize',
'convert_sync_batchnorm'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
__version__ = '0.35.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.34.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
_base_ = './mask-rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_shortcut_doc():
class MyDoc(BaseDoc):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDoc):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc4'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc3'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = './fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py'
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py'
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow
from simpleranker import SimpleRanker
def test_integration(documents_chunk):
with Flow().add(uses=SimpleRanker, uses_with={'metric': 'cosine'}) as flow:
resp = flow.post(on='/search', inputs=documents_chunk, return_results=True)
for r in resp:
for doc in r.docs:
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow
from ...simpleranker import SimpleRanker
def test_integration(documents_chunk):
with Flow().add(uses=SimpleRanker, uses_with={'metric': 'cosine'}) as flow:
resp = flow.post(on='/search', inputs=documents_chunk, return_results=True)
for r in resp:
for doc in r.docs:
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
spend_credits = cast(
Callable[[Any, NodeExecutionEntry, float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.CompositeTensor):
# Check for ExtensionTypes
spec = t._type_spec
elif hasattr(t, "shape") and hasattr(t, "dtype"):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
shape = tf.TensorShape(shape_list)
spec._shape = shape
return spec
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if backend.backend() == "torch" and backend.is_tensor(inputs):
# Plain `np.asarray()` conversion fails with PyTorch.
inputs = backend.convert_to_numpy(inputs)
inputs = tf.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
|
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.CompositeTensor):
# Check for ExtensionTypes
spec = t._type_spec
elif hasattr(t, "shape") and hasattr(t, "dtype"):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
shape = tf.TensorShape(shape_list)
spec._shape = shape
return spec
|
import typing
import pydantic
class LibraryAgent(pydantic.BaseModel):
id: str # Changed from agent_id to match GraphMeta
version: int # Changed from agent_version to match GraphMeta
is_active: bool # Added to match GraphMeta
name: str
description: str
isCreatedByUser: bool
# Made input_schema and output_schema match GraphMeta's type
input_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
output_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
|
import datetime
import json
import typing
import prisma.models
import pydantic
import backend.data.block
import backend.data.graph
import backend.server.model
class LibraryAgent(pydantic.BaseModel):
id: str # Changed from agent_id to match GraphMeta
agent_id: str
agent_version: int # Changed from agent_version to match GraphMeta
preset_id: str | None
updated_at: datetime.datetime
name: str
description: str
# Made input_schema and output_schema match GraphMeta's type
input_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
output_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
is_favorite: bool
is_created_by_user: bool
is_latest_version: bool
@staticmethod
def from_db(agent: prisma.models.LibraryAgent):
if not agent.Agent:
raise ValueError("AgentGraph is required")
graph = backend.data.graph.GraphModel.from_db(agent.Agent)
agent_updated_at = agent.Agent.updatedAt
lib_agent_updated_at = agent.updatedAt
# Take the latest updated_at timestamp either when the graph was updated or the library agent was updated
updated_at = (
max(agent_updated_at, lib_agent_updated_at)
if agent_updated_at
else lib_agent_updated_at
)
return LibraryAgent(
id=agent.id,
agent_id=agent.agentId,
agent_version=agent.agentVersion,
updated_at=updated_at,
name=graph.name,
description=graph.description,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
is_favorite=agent.isFavorite,
is_created_by_user=agent.isCreatedByUser,
is_latest_version=graph.is_active,
preset_id=agent.AgentPreset.id if agent.AgentPreset else None,
)
class LibraryAgentPreset(pydantic.BaseModel):
id: str
updated_at: datetime.datetime
agent_id: str
agent_version: int
name: str
description: str
is_active: bool
inputs: dict[str, typing.Union[backend.data.block.BlockInput, typing.Any]]
@staticmethod
def from_db(preset: prisma.models.AgentPreset):
input_data = {}
for data in preset.InputPresets or []:
input_data[data.name] = json.loads(data.data)
return LibraryAgentPreset(
id=preset.id,
updated_at=preset.updatedAt,
agent_id=preset.agentId,
agent_version=preset.agentVersion,
name=preset.name,
description=preset.description,
is_active=preset.isActive,
inputs=input_data,
)
class LibraryAgentPresetResponse(pydantic.BaseModel):
presets: list[LibraryAgentPreset]
pagination: backend.server.model.Pagination
class CreateLibraryAgentPresetRequest(pydantic.BaseModel):
name: str
description: str
inputs: dict[str, typing.Union[backend.data.block.BlockInput, typing.Any]]
agent_id: str
agent_version: int
is_active: bool
|
from langchain_core.load import dumpd, dumps, load, loads
from langchain_openai import ChatOpenAI, OpenAI
def test_loads_openai_llm() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8) # type: ignore[call-arg]
llm_string = dumps(llm)
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
llm_string_2 = dumps(llm2)
assert llm_string_2 == llm_string
assert isinstance(llm2, OpenAI)
def test_load_openai_llm() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
llm_obj = dumpd(llm)
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
assert dumpd(llm2) == llm_obj
assert isinstance(llm2, OpenAI)
def test_loads_openai_chat() -> None:
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
llm_string = dumps(llm)
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
llm_string_2 = dumps(llm2)
assert llm_string_2 == llm_string
assert isinstance(llm2, ChatOpenAI)
def test_load_openai_chat() -> None:
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
llm_obj = dumpd(llm)
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
assert dumpd(llm2) == llm_obj
assert isinstance(llm2, ChatOpenAI)
|
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import load, loads
from langchain_openai import ChatOpenAI, OpenAI
def test_loads_openai_llm() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8) # type: ignore[call-arg]
llm_string = dumps(llm)
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
llm_string_2 = dumps(llm2)
assert llm_string_2 == llm_string
assert isinstance(llm2, OpenAI)
def test_load_openai_llm() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
llm_obj = dumpd(llm)
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
assert dumpd(llm2) == llm_obj
assert isinstance(llm2, OpenAI)
def test_loads_openai_chat() -> None:
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
llm_string = dumps(llm)
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
llm_string_2 = dumps(llm2)
assert llm_string_2 == llm_string
assert isinstance(llm2, ChatOpenAI)
def test_load_openai_chat() -> None:
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
llm_obj = dumpd(llm)
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2.dict() == llm.dict()
assert dumpd(llm2) == llm_obj
assert isinstance(llm2, ChatOpenAI)
|
import os
from typing import Any, Callable, List, Optional, Tuple
import torch
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string): Root directory of dataset.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
import os
from typing import Any, Callable, List, Optional, Tuple
import torch
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string): Root directory of dataset.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from datasets import load_dataset
import logging
import sys
import torch
import os
from sentence_transformers.similarity_functions import SimilarityFunction
script_folder_path = os.path.dirname(os.path.realpath(__file__))
# Limit torch to 4 threads
torch.set_num_threads(4)
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "stsb-distilroberta-base-v2"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
model.evaluate(dev_evaluator)
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
model.evaluate(test_evaluator)
|
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
from sentence_transformers import SentenceTransformer, util, LoggingHandler, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
import sys
import torch
import gzip
import os
import csv
script_folder_path = os.path.dirname(os.path.realpath(__file__))
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
model_name = sys.argv[1] if len(sys.argv) > 1 else "stsb-distilroberta-base-v2"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
model.evaluate(evaluator)
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
# TODO: mstest is not currently implemented
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[180],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=48)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
# TODO: mstest is not currently implemented
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[180],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=48)
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio()
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio()
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio()
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio()
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await asyncio.gather(stream_1, stream_2)
@pytest.mark.asyncio()
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
assert handler_1.ctx
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert handler_2.ctx
assert await handler_2.ctx.get("cur_count") == 2
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio()
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio()
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio()
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio()
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await asyncio.gather(stream_1, stream_2)
@pytest.mark.asyncio()
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert handler_2.ctx
assert await handler_2.ctx.get("cur_count") == 2
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .TopKActivation import TopKActivation
__all__ = ["CSRSparsity", "TopKActivation"]
# TODO : Add in models the possibility to have the MLM head(for splade)
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
__all__ = ["CSRSparsity"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
# Determine whether or not different ranks use different seed.
diff_rank_seed = runner._randomness_cfg.get(
'diff_rank_seed', False)
self.dataloader = runner.build_dataloader(
dataloader, seed=runner.seed, diff_rank_seed=diff_rank_seed)
else:
self.dataloader = dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> None:
"""Execute loop."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
self.dataloader = runner.build_dataloader(
dataloader, seed=runner.seed)
else:
self.dataloader = dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> None:
"""Execute loop."""
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._set_topology_graph()
self._set_connection_pool()
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
self.args,
topology_graph=self._topology_graph,
connection_pool=self._connection_pool,
logger=self.logger,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of the server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
await self._connection_pool.close()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._set_topology_graph()
self._set_connection_pool()
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
self.args,
topology_graph=self._topology_graph,
connection_pool=self._connection_pool,
logger=self.logger,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
self._connection_pool.start()
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
await self._connection_pool.close()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
from docarray.index.backends.redis import RedisDocumentIndex # noqa: F401
from docarray.index.backends.milvus import MilvusDocumentIndex # noqa: F401
__all__ = [
'InMemoryExactNNIndex',
'ElasticDocIndex',
'ElasticV7DocIndex',
'QdrantDocumentIndex',
'WeaviateDocumentIndex',
'RedisDocumentIndex',
'MilvusDocumentIndex',
]
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
elif name == 'MilvusDocumentIndex':
import_library('pymilvus', raise_error=True)
import docarray.index.backends.milvus as lib
elif name == 'RedisDocumentIndex':
import_library('redis', raise_error=True)
import docarray.index.backends.redis as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
from docarray.index.backends.redis import RedisDocumentIndex # noqa: F401
__all__ = ['InMemoryExactNNIndex']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
elif name == 'RedisDocumentIndex':
import_library('redis', raise_error=True)
import docarray.index.backends.redis as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import sys
import numpy as np
import pytest
from hypothesis import given, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
# There are lots of warnings if XGBoost is not running on ATS-enabled systems.
pytestmark = pytest.mark.filterwarnings("ignore")
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.integers(2, 16),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_extmem_qdm(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
n_bins: int,
on_host: bool,
) -> None:
check_extmem_qdm(
n_samples_per_batch,
n_features,
n_batches=n_batches,
n_bins=n_bins,
device="cuda",
on_host=on_host,
)
def test_invalid_device_extmem_qdm() -> None:
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=False), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for GPU"):
xgb.train({"device": "cuda"}, Xy)
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=True), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for CPU"):
xgb.train({"device": "cpu"}, Xy)
def test_concat_pages_invalid() -> None:
it = tm.IteratorForTest(*tm.make_batches(64, 16, 4, use_cupy=True), cache=None)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="can not be used with concatenated pages"):
xgb.train(
{
"device": "cuda",
"subsample": 0.5,
"sampling_method": "gradient_based",
"extmem_single_page": True,
"objective": "reg:absoluteerror",
},
Xy,
)
def test_concat_pages() -> None:
boosters = []
for min_cache_page_bytes in [0, 256, 386, np.iinfo(np.int64).max]:
it = tm.IteratorForTest(
*tm.make_batches(64, 16, 4, use_cupy=True),
cache=None,
min_cache_page_bytes=min_cache_page_bytes,
on_host=True,
)
Xy = xgb.ExtMemQuantileDMatrix(it)
booster = xgb.train(
{
"device": "cuda",
"objective": "reg:absoluteerror",
},
Xy,
)
boosters.append(booster.save_raw(raw_format="json"))
for model in boosters[1:]:
assert str(model) == str(boosters[0])
@given(
strategies.integers(1, 64),
strategies.integers(1, 8),
strategies.integers(1, 4),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_quantile_objective(
n_samples_per_batch: int, n_features: int, n_batches: int
) -> None:
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"hist",
"cuda",
)
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"approx",
"cuda",
)
|
import sys
import numpy as np
import pytest
from hypothesis import given, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
# There are lots of warnings if XGBoost is not running on ATS-enabled systems.
pytestmark = pytest.mark.filterwarnings("ignore")
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.integers(2, 16),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_extmem_qdm(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
n_bins: int,
on_host: bool,
) -> None:
check_extmem_qdm(
n_samples_per_batch,
n_features,
n_batches=n_batches,
n_bins=n_bins,
device="cuda",
on_host=on_host,
)
def test_invalid_device_extmem_qdm() -> None:
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=False), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for GPU"):
xgb.train({"device": "cuda"}, Xy)
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=True), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for CPU"):
xgb.train({"device": "cpu"}, Xy)
def test_concat_pages_invalid() -> None:
it = tm.IteratorForTest(*tm.make_batches(64, 16, 4, use_cupy=True), cache=None)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="can not be used with concatenated pages"):
xgb.train(
{
"device": "cuda",
"subsample": 0.5,
"sampling_method": "gradient_based",
"extmem_concat_pages": True,
"objective": "reg:absoluteerror",
},
Xy,
)
def test_concat_pages() -> None:
boosters = []
for min_cache_page_bytes in [0, 256, 386, np.iinfo(np.int64).max]:
it = tm.IteratorForTest(
*tm.make_batches(64, 16, 4, use_cupy=True),
cache=None,
min_cache_page_bytes=min_cache_page_bytes,
on_host=True,
)
Xy = xgb.ExtMemQuantileDMatrix(it)
booster = xgb.train(
{
"device": "cuda",
"objective": "reg:absoluteerror",
},
Xy,
)
boosters.append(booster.save_raw(raw_format="json"))
for model in boosters[1:]:
assert str(model) == str(boosters[0])
@given(
strategies.integers(1, 64),
strategies.integers(1, 8),
strategies.integers(1, 4),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_quantile_objective(
n_samples_per_batch: int, n_features: int, n_batches: int
) -> None:
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"hist",
"cuda",
)
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"approx",
"cuda",
)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.9.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from abc import abstractmethod
from typing import List, Sequence
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.instrumentation import DispatcherSpanMixin
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType
from llama_index.core.schema import QueryBundle
from llama_index.core.tools.types import ToolMetadata
class SubQuestion(BaseModel):
sub_question: str
tool_name: str
class SubQuestionList(BaseModel):
"""
A pydantic object wrapping a list of sub-questions.
This is mostly used to make getting a json schema easier.
"""
items: List[SubQuestion]
class BaseQuestionGenerator(PromptMixin, DispatcherSpanMixin):
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
@abstractmethod
def generate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
pass
@abstractmethod
async def agenerate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
pass
|
from abc import abstractmethod
from typing import List, Sequence
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.instrumentation import DispatcherSpanMixin
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType
from llama_index.core.schema import QueryBundle
from llama_index.core.tools.types import ToolMetadata
class SubQuestion(BaseModel):
sub_question: str
tool_name: str
class SubQuestionList(BaseModel):
"""A pydantic object wrapping a list of sub-questions.
This is mostly used to make getting a json schema easier.
"""
items: List[SubQuestion]
class BaseQuestionGenerator(PromptMixin, DispatcherSpanMixin):
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
@abstractmethod
def generate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
pass
@abstractmethod
async def agenerate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
pass
|
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_get_set_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client_args.protocol == GatewayProtocolType.from_string(protocol)
# gateway_args returns multiple protocols
assert f.gateway_args.protocol[0] == GatewayProtocolType.from_string(protocol)
# flow returns single or multiple protocols
assert f.protocol == GatewayProtocolType.from_string(protocol)
assert f.client_args.port == 12345
# gateway_args returns multiple ports
assert f.gateway_args.port[0] == 12345
# flow returns single or multiple ports
assert f.port == 12345
f._update_network_interface(port=54321)
assert f.client_args.port == 54321
assert f.gateway_args.port[0] == 54321
|
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_get_set_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client_args.protocol == GatewayProtocolType.from_string(protocol)
assert f.gateway_args.protocol == GatewayProtocolType.from_string(protocol)
assert int(f.client_args.port) == 12345
assert int(f.gateway_args.port) == 12345
f._update_network_interface(port=54321)
assert int(f.client_args.port) == 54321
assert int(f.gateway_args.port) == 54321
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.9.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import asyncio
from typing import Any, AsyncGenerator, List, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self._ctx = ctx
@property
def ctx(self) -> Optional[Context]:
return self._ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if self.ctx is None:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if isinstance(ev, StopEvent):
break
async def run_step(self) -> Optional[List[Event]]:
"""
Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx is None:
raise ValueError("Context must be set to run a workflow step-wise!")
if not self.ctx.stepwise:
raise ValueError(
"Workflow must be created passing stepwise=True to call this method."
)
try:
# Reset the events collected in current step
self.ctx._step_events_holding = None
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
await self.ctx.shutdown()
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx.get_holding_events()
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
import asyncio
from typing import Any, AsyncGenerator, List, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self._ctx = ctx
@property
def ctx(self) -> Optional[Context]:
return self._ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if self.ctx is None:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if isinstance(ev, StopEvent):
break
async def run_step(self) -> Optional[List[Event]]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx is None:
raise ValueError("Context must be set to run a workflow step-wise!")
if not self.ctx.stepwise:
raise ValueError(
"Workflow must be created passing stepwise=True to call this method."
)
try:
# Reset the events collected in current step
self.ctx._step_events_holding = None
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
await self.ctx.shutdown()
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx.get_holding_events()
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
from .typing_utils import _get_detailed_type, _is_valid_type
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_dataset import BaseDataset, Compose, force_full_init
from .dataset_wrapper import ClassBalancedDataset, ConcatDataset, RepeatDataset
from .sampler import DefaultSampler, InfiniteSampler
from .utils import (COLLATE_FUNCTIONS, default_collate, pseudo_collate,
worker_init_fn)
__all__ = [
'BaseDataset', 'Compose', 'force_full_init', 'ClassBalancedDataset',
'ConcatDataset', 'RepeatDataset', 'DefaultSampler', 'InfiniteSampler',
'worker_init_fn', 'pseudo_collate', 'COLLATE_FUNCTIONS', 'default_collate'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_dataset import BaseDataset, Compose, force_full_init
from .dataset_wrapper import ClassBalancedDataset, ConcatDataset, RepeatDataset
from .sampler import DefaultSampler, InfiniteSampler
from .utils import pseudo_collate, worker_init_fn
__all__ = [
'BaseDataset', 'Compose', 'force_full_init', 'ClassBalancedDataset',
'ConcatDataset', 'RepeatDataset', 'DefaultSampler', 'InfiniteSampler',
'worker_init_fn', 'pseudo_collate'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DataForSeoAPIWrapper": "langchain_community.utilities.dataforseo_api_search",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DataForSeoAPIWrapper",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DataForSeoAPIWrapper": "langchain_community.utilities.dataforseo_api_search"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DataForSeoAPIWrapper",
]
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
"""Test ChatYuan2 wrapper."""
import pytest
from langchain_core.messages import (
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.yuan2 import (
ChatYuan2,
_convert_dict_to_message,
_convert_message_to_dict,
)
@pytest.mark.requires("openai")
def test_yuan2_model_param() -> None:
chat = ChatYuan2(model="foo")
assert chat.model_name == "foo"
chat = ChatYuan2(model_name="foo") # type: ignore[call-arg]
assert chat.model_name == "foo"
@pytest.mark.requires("openai")
def test_yuan2_timeout_param() -> None:
chat = ChatYuan2(request_timeout=5) # type: ignore[call-arg]
assert chat.request_timeout == 5
chat = ChatYuan2(timeout=10)
assert chat.request_timeout == 10
@pytest.mark.requires("openai")
def test_yuan2_stop_sequences_param() -> None:
chat = ChatYuan2(stop=["<eod>"]) # type: ignore[call-arg]
assert chat.stop == ["<eod>"]
chat = ChatYuan2(stop_sequences=["<eod>"])
assert chat.stop == ["<eod>"]
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="hello")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="hello")
assert result == expected_output
def test__convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="hello")
assert result == expected_output
|
"""Test ChatYuan2 wrapper."""
import pytest
from langchain_core.messages import (
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.yuan2 import (
ChatYuan2,
_convert_dict_to_message,
_convert_message_to_dict,
)
@pytest.mark.requires("openai")
def test_yuan2_model_param() -> None:
chat = ChatYuan2(model="foo") # type: ignore[call-arg]
assert chat.model_name == "foo"
chat = ChatYuan2(model_name="foo") # type: ignore[call-arg]
assert chat.model_name == "foo"
@pytest.mark.requires("openai")
def test_yuan2_timeout_param() -> None:
chat = ChatYuan2(request_timeout=5) # type: ignore[call-arg]
assert chat.request_timeout == 5
chat = ChatYuan2(timeout=10) # type: ignore[call-arg]
assert chat.request_timeout == 10
@pytest.mark.requires("openai")
def test_yuan2_stop_sequences_param() -> None:
chat = ChatYuan2(stop=["<eod>"]) # type: ignore[call-arg]
assert chat.stop == ["<eod>"]
chat = ChatYuan2(stop_sequences=["<eod>"]) # type: ignore[call-arg]
assert chat.stop == ["<eod>"]
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="hello")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="hello")
assert result == expected_output
def test__convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "hello"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="hello")
assert result == expected_output
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._set_single_port_protocol()
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._set_single_port_protocol()
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
from .dpr_text import DPRTextEncoder
|
from .dpr_text import DPRTextEncoder
|
from langchain_core.prompt_values import ChatPromptValue, ChatPromptValueConcrete
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
BaseStringMessagePromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessageLike,
MessageLikeRepresentation,
MessagePromptTemplateT,
MessagesPlaceholder,
SystemMessagePromptTemplate,
_convert_to_message,
_create_template_from_message_type,
)
__all__ = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BaseMessagePromptTemplate",
"BaseStringMessagePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"ChatPromptValue",
"ChatPromptValueConcrete",
"HumanMessagePromptTemplate",
"MessageLike",
"MessageLikeRepresentation",
"MessagePromptTemplateT",
"MessagesPlaceholder",
"SystemMessagePromptTemplate",
"_convert_to_message",
"_create_template_from_message_type",
]
from langchain_core.prompts.message import BaseMessagePromptTemplate
|
from langchain_core.prompt_values import ChatPromptValue, ChatPromptValueConcrete
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
BaseStringMessagePromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessageLike,
MessageLikeRepresentation,
MessagePromptTemplateT,
MessagesPlaceholder,
SystemMessagePromptTemplate,
_convert_to_message,
_create_template_from_message_type,
)
__all__ = [
"BaseMessagePromptTemplate",
"MessagesPlaceholder",
"BaseStringMessagePromptTemplate",
"ChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"AIMessagePromptTemplate",
"SystemMessagePromptTemplate",
"BaseChatPromptTemplate",
"ChatPromptTemplate",
"ChatPromptValue",
"ChatPromptValueConcrete",
"_convert_to_message",
"_create_template_from_message_type",
"MessagePromptTemplateT",
"MessageLike",
"MessageLikeRepresentation",
]
from langchain_core.prompts.message import BaseMessagePromptTemplate
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackReIDInputs,
PackTrackInputs, ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadTrackAnnotations)
from .transformers_glip import GTBoxSubOne_GLIP, RandomFlip_GLIP
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixScaleResize, FixShapeResize,
MinIoURandomCrop, MixUp, Mosaic, Pad,
PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, ResizeShortestEdge,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs', 'PackReIDInputs', 'FixScaleResize',
'ResizeShortestEdge', 'GTBoxSubOne_GLIP', 'RandomFlip_GLIP'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackReIDInputs,
PackTrackInputs, ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixScaleResize, FixShapeResize,
MinIoURandomCrop, MixUp, Mosaic, Pad,
PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, ResizeShortestEdge,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs', 'PackReIDInputs', 'FixScaleResize', 'ResizeShortestEdge'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import CopyFileTool
from langchain_community.tools.file_management.copy import FileCopyInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FileCopyInput": "langchain_community.tools.file_management.copy",
"CopyFileTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CopyFileTool",
"FileCopyInput",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import CopyFileTool
from langchain_community.tools.file_management.copy import FileCopyInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FileCopyInput": "langchain_community.tools.file_management.copy",
"CopyFileTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FileCopyInput",
"CopyFileTool",
]
|
_base_ = '../htc/htc_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
_base_ = '../htc/htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
embeddings = model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
sparsity_infos = model.get_sparsity_stats(embeddings)
if sparsity_infos["num_rows"] == self.queries_info["num_rows"] and "num_cols" not in self.queries_info.keys():
self.queries_info.update(
{
"num_cols": sparsity_infos["num_cols"],
"row_non_zero_mean": sparsity_infos["row_non_zero_mean"],
"row_sparsity_mean": sparsity_infos["row_sparsity_mean"],
}
)
else:
self.corpus_info.update(
{
"num_cols": sparsity_infos["num_cols"],
"row_non_zero_mean": sparsity_infos["row_non_zero_mean"],
"row_sparsity_mean": sparsity_infos["row_sparsity_mean"],
}
)
return embeddings
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
embeddings = model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
sparsity_infos = model.get_sparsity_stats(embeddings)
if (
sparsity_infos["num_rows"] == self.queries_info["length_of_queries"]
and "sparsity_infos" not in self.queries_info.keys()
):
self.queries_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
else:
self.corpus_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
return embeddings
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='NASFCOSHead',
num_classes=80,
in_channels=256,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=2)
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='NASFCOSHead',
num_classes=80,
in_channels=256,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=2)
# optimizer
optim_wrapper = dict(
optimizer=dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)))
|
import posixpath
from pathlib import Path
from unittest.mock import patch
import fsspec
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
original_registry = fsspec.registry.copy()
fsspec.register_implementation("mock", MockFileSystem)
fsspec.register_implementation("tmp", TmpDirFileSystem)
yield
fsspec.registry = original_registry
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
|
import posixpath
from pathlib import Path
import fsspec
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
@pytest.fixture
def mock_fsspec():
original_registry = fsspec.registry.copy()
fsspec.register_implementation("mock", MockFileSystem)
yield
fsspec.registry = original_registry
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
|
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
left.append(doc)
return left
def reduce_all(docarrays: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
.. note::
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docarrays: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docarrays) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docarrays[0]
others = docarrays[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for docs in others:
reduce(left, docs, left_id_map)
return left
|
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocArray
def reduce(
left: DocArray, right: DocArray, left_id_map: Optional[Dict] = None
) -> 'DocArray':
"""
Reduces left and right DocArray into one DocArray in-place.
Changes are applied to the left DocArray.
Reducing 2 DocArrays consists in adding Documents in the second DocArray
to the first DocArray if they do not exist.
If a Document exists in both DocArrays (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocArrays are also reduced in the same way.
:param left: First DocArray to be reduced. Changes will be applied to it
in-place
:param right: Second DocArray to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocArray
:return: Reduced DocArray
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
left.append(doc)
return left
def reduce_all(docarrays: List[DocArray]) -> DocArray:
"""
Reduces a list of DocArrays into one DocArray.
Changes are applied to the first DocArray in-place.
The resulting DocArray contains Documents of all DocArrays.
If a Document exists (identified by their ID) in many DocArrays,
data properties are merged with priority to the left-most
DocArrays (that is, if a data attribute is set in a Document
belonging to many DocArrays, the attribute value of the left-most
DocArray is kept).
Nested DocArrays belonging to many DocArrays
are also reduced in the same way.
.. note::
- Nested DocArrays order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocArrays
when applying reduction.
:param docarrays: List of DocArrays to be reduced
:return: the resulting DocArray
"""
if len(docarrays) <= 1:
raise Exception(
'In order to reduce DocArrays' ' we should have more than one DocArray'
)
left = docarrays[0]
others = docarrays[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for da in others:
reduce(left, da, left_id_map)
return left
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.7'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.6'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
import warnings
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LeakyReLU")
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit activation layer.
This layer allows a small gradient when the unit is not active.
Formula:
``` python
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Example:
``` python
leaky_relu_layer = LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
result = leaky_relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
negative_slope: Float >= 0.0. Negative slope coefficient.
Defaults to `0.3`.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(self, negative_slope=0.3, **kwargs):
if "alpha" in kwargs:
negative_slope = kwargs.pop("alpha")
warnings.warn(
"Argument `alpha` is deprecated. Use `negative_slope` instead."
)
super().__init__(**kwargs)
if negative_slope is None or negative_slope < 0:
raise ValueError(
"The negative_slope value of a Leaky ReLU layer "
"cannot be None or negative value. Expected a float."
f" Received: negative_slope={negative_slope}"
)
self.negative_slope = negative_slope
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return activations.leaky_relu(
inputs, negative_slope=self.negative_slope
)
def get_config(self):
config = super().get_config()
config.update({"negative_slope": self.negative_slope})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
import warnings
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LeakyReLU")
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit activation layer.
This layer allows a small gradient when the unit is not active.
Formula:
``` python
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Example:
``` python
leaky_relu_layer = LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
result = leaky_relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
negative_slope: Float >= 0.0. Negative slope coefficient.
Defaults to `0.3`.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(self, negative_slope=0.3, **kwargs):
if "alpha" in kwargs:
negative_slope = kwargs.pop("alpha")
warnings.warn(
"Argument `alpha` is deprecated. Use `negative_slope` instead."
)
super().__init__(**kwargs)
if negative_slope is None or negative_slope < 0:
raise ValueError(
"The negative_slope value of a Leaky ReLU layer "
"cannot be None or negative value. Expected a float."
f" Received: negative_slope={negative_slope}"
)
self.negative_slope = negative_slope
self.supports_masking = True
self.built = True
def call(self, inputs):
return activations.leaky_relu(
inputs, negative_slope=self.negative_slope
)
def get_config(self):
config = super().get_config()
config.update({"negative_slope": self.negative_slope})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
"""
This basic example loads a pre-trained model from the web and uses it to
generate sentence embeddings for a given list of sentences.
"""
import logging
import numpy as np
from sentence_transformers import LoggingHandler, SentenceTransformer
#### Just some code to print debug information to stdout
np.set_printoptions(threshold=100)
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Load pre-trained Sentence Transformer Model. It will be downloaded automatically
model = SentenceTransformer("all-MiniLM-L6-v2")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
sentence_embeddings = model.encode(sentences)
# The result is a list of sentence embeddings as numpy arrays
for sentence, embedding in zip(sentences, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", embedding)
print("")
|
"""
This basic example loads a pre-trained model from the web and uses it to
generate sentence embeddings for a given list of sentences.
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import numpy as np
import logging
#### Just some code to print debug information to stdout
np.set_printoptions(threshold=100)
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Load pre-trained Sentence Transformer Model. It will be downloaded automatically
model = SentenceTransformer('all-MiniLM-L6-v2')
# Embed a list of sentences
sentences = ['This framework generates embeddings for each input sentence',
'Sentences are passed as a list of string.',
'The quick brown fox jumps over the lazy dog.']
sentence_embeddings = model.encode(sentences)
# The result is a list of sentence embeddings as numpy arrays
for sentence, embedding in zip(sentences, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", embedding)
print("")
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[1024, 512, 256],
out_channels=[512, 256, 128]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[512, 256, 128],
out_channels=[1024, 512, 256],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(608, 608),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000, # same as burn-in in darknet
warmup_ratio=0.1,
step=[218, 246])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=273)
evaluation = dict(interval=1, metric=['bbox'])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[1024, 512, 256],
out_channels=[512, 256, 128]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[512, 256, 128],
out_channels=[1024, 512, 256],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(608, 608),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000, # same as burn-in in darknet
warmup_ratio=0.1,
step=[218, 246])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=273)
evaluation = dict(interval=1, metric=['bbox'])
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""Forward function of SparseR-CNN and QueryInst in train stage.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (List[Tensor], optional) : Segmentation masks for
each box. This is required to train QueryInst.
proposals (List[Tensor], optional): override rpn proposals with
custom proposals. Use when `with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert proposals is None, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, img_metas)
results = self.roi_head.simple_test(
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh=imgs_whwh,
rescale=rescale)
return results
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
# backbone
x = self.extract_feat(img)
# rpn
num_imgs = len(img)
dummy_img_metas = [
dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
]
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, dummy_img_metas)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
proposal_features,
dummy_img_metas)
return roi_outs
|
__version__ = '0.19.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.18.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
if isinstance(mask_results, tuple): # mask scoring
cls_segms, cls_mask_scores = mask_results
else:
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = [[] for _ in range(num_classes)]
for i in range(len(cls_segms)):
for cls_segm in cls_segms[i]:
encoded_mask_results[i].append(
mask_util.encode(
np.array(
cls_segm[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
if isinstance(mask_results, tuple):
return encoded_mask_results, cls_mask_scores
else:
return encoded_mask_results
def mask2bbox(masks):
"""Obtain tight bounding boxes of binary masks.
Args:
masks (Tensor): Binary mask of shape (n, h, w).
Returns:
Tensor: Bboxe with shape (n, 4) of \
positive region in binary mask.
"""
N = masks.shape[0]
bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
x_any = torch.any(masks, dim=1)
y_any = torch.any(masks, dim=2)
for i in range(N):
x = torch.where(x_any[i, :])[0]
y = torch.where(y_any[i, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[i, :] = bboxes.new_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1])
return bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
if isinstance(mask_results, tuple): # mask scoring
cls_segms, cls_mask_scores = mask_results
else:
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = [[] for _ in range(num_classes)]
for i in range(len(cls_segms)):
for cls_segm in cls_segms[i]:
encoded_mask_results[i].append(
mask_util.encode(
np.array(
cls_segm[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
if isinstance(mask_results, tuple):
return encoded_mask_results, cls_mask_scores
else:
return encoded_mask_results
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import LlavaNextVideoVideoProcessor
class LlavaNextVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class LlavaNextVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = LlavaNextVideoVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = LlavaNextVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 20, "width": 20})
self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42, crop_size=84)
self.assertEqual(video_processor.size, {"shortest_edge": 42})
self.assertEqual(video_processor.crop_size, {"height": 84, "width": 84})
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
pass
if is_vision_available():
if is_torchvision_available():
from transformers import LlavaNextVideoVideoProcessor
class LlavaNextVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class LlavaNextVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = LlavaNextVideoVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = LlavaNextVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 20, "width": 20})
self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42, crop_size=84)
self.assertEqual(video_processor.size, {"shortest_edge": 42})
self.assertEqual(video_processor.crop_size, {"height": 84, "width": 84})
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Union
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
# List of InstanceData
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
# List of DetDataSample
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Union
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
# List of InstanceData
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
# List of DetDataSample
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
|
class MediaUploadError(Exception):
"""Base exception for media upload errors"""
pass
class InvalidFileTypeError(MediaUploadError):
"""Raised when file type is not supported"""
pass
class FileSizeTooLargeError(MediaUploadError):
"""Raised when file size exceeds maximum limit"""
pass
class FileReadError(MediaUploadError):
"""Raised when there's an error reading the file"""
pass
class StorageConfigError(MediaUploadError):
"""Raised when storage configuration is invalid"""
pass
class StorageUploadError(MediaUploadError):
"""Raised when upload to storage fails"""
pass
class StoreError(Exception):
"""Base exception for store-related errors"""
pass
class AgentNotFoundError(StoreError):
"""Raised when an agent is not found"""
pass
class CreatorNotFoundError(StoreError):
"""Raised when a creator is not found"""
pass
class ListingExistsError(StoreError):
"""Raised when trying to create a listing that already exists"""
pass
class DatabaseError(StoreError):
"""Raised when there is an error interacting with the database"""
pass
class ProfileNotFoundError(StoreError):
"""Raised when a profile is not found"""
pass
class ListingNotFoundError(StoreError):
"""Raised when a store listing is not found"""
pass
class SubmissionNotFoundError(StoreError):
"""Raised when a submission is not found"""
pass
|
class MediaUploadError(Exception):
"""Base exception for media upload errors"""
pass
class InvalidFileTypeError(MediaUploadError):
"""Raised when file type is not supported"""
pass
class FileSizeTooLargeError(MediaUploadError):
"""Raised when file size exceeds maximum limit"""
pass
class FileReadError(MediaUploadError):
"""Raised when there's an error reading the file"""
pass
class StorageConfigError(MediaUploadError):
"""Raised when storage configuration is invalid"""
pass
class StorageUploadError(MediaUploadError):
"""Raised when upload to storage fails"""
pass
class StoreError(Exception):
"""Base exception for store-related errors"""
pass
class AgentNotFoundError(StoreError):
"""Raised when an agent is not found"""
pass
class CreatorNotFoundError(StoreError):
"""Raised when a creator is not found"""
pass
class ListingExistsError(StoreError):
"""Raised when trying to create a listing that already exists"""
pass
class DatabaseError(StoreError):
"""Raised when there is an error interacting with the database"""
pass
class ProfileNotFoundError(StoreError):
"""Raised when a profile is not found"""
pass
class SubmissionNotFoundError(StoreError):
"""Raised when a submission is not found"""
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_utils import TORCH_VERSION
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
def set_random_seed(seed: Optional[int] = None,
deterministic: bool = False,
diff_rank_seed: bool = False) -> int:
"""Set random seed.
Args:
seed (int, optional): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Defaults to False.
diff_rank_seed (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Defaults to False.
"""
if seed is None:
seed = sync_random_seed()
if diff_rank_seed:
rank = get_rank()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
if torch.backends.cudnn.benchmark:
print_log(
'torch.backends.cudnn.benchmark is going to be set as '
'`False` to cause cuDNN to deterministically select an '
'algorithm',
logger='current',
level=logging.WARNING)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
torch.use_deterministic_algorithms(True)
return seed
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_utils import TORCH_VERSION
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
def set_random_seed(seed: Optional[int] = None,
deterministic: bool = False,
diff_rank_seed: bool = False) -> int:
"""Set random seed.
Args:
seed (int, optional): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Defaults to False.
diff_rank_seed (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Defaults to False.
"""
if seed is None:
seed = sync_random_seed()
if diff_rank_seed:
rank = get_rank()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
if torch.backends.cudnn.benchmark:
print_log(
'torch.backends.cudnn.benchmark is going to be set as '
'`False` to cause cuDNN to deterministically select an '
'algorithm',
logger='current',
level=logging.WARNING)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
torch.use_deterministic_algorithms(True)
return seed
|
_base_ = [
'./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501
]
dataset_type = 'MOTChallengeDataset'
detector = _base_.model
detector.pop('data_preprocessor')
del _base_.model
model = dict(
type='StrongSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
pad_size_divisor=32,
batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(576, 1024),
size_divisor=32,
interval=10)
]),
detector=detector,
reid=dict(
type='BaseReID',
data_preprocessor=None,
backbone=dict(
type='mmcls.ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=380,
loss_cls=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
cmc=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='StrongSORTTracker',
motion=dict(type='KalmanFilter', center_only=False, use_nsa=True),
obj_score_thr=0.6,
reid=dict(
num_samples=None,
img_scale=(256, 128),
img_norm_cfg=dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
match_score_thr=0.3,
motion_weight=0.02,
),
match_iou_thr=0.7,
momentums=dict(embeds=0.1, ),
num_tentatives=2,
num_frames_retain=100),
postprocess_model=dict(
type='AppearanceFreeLink',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth', # noqa: E501
temporal_threshold=(0, 30),
spatial_threshold=50,
confidence_threshold=0.95,
))
train_pipeline = None
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='Resize', scale=_base_.img_scale, keep_ratio=True),
dict(
type='Pad',
size_divisor=32,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadTrackAnnotations'),
]),
dict(type='PackTrackInputs')
]
train_dataloader = None
val_dataloader = dict(
# Now StrongSORT only support video_based sampling
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=_base_.data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
# when you evaluate track performance, you need to remove metainfo
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
train_cfg = None
optim_wrapper = None
# evaluator
val_evaluator = dict(
_delete_=True,
type='MOTChallengeMetric',
metric=['HOTA', 'CLEAR', 'Identity'],
# use_postprocess to support AppearanceFreeLink in val_evaluator
use_postprocess=True,
postprocess_tracklet_cfg=[
dict(
type='InterpolateTracklets',
min_num_frames=5,
max_num_frames=20,
use_gsi=True,
smooth_tau=10)
])
test_evaluator = val_evaluator
default_hooks = dict(logger=dict(type='LoggerHook', interval=1))
del _base_.param_scheduler
del _base_.custom_hooks
|
_base_ = [
'./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501
]
dataset_type = 'MOTChallengeDataset'
detector = _base_.model
detector.pop('data_preprocessor')
del _base_.model
model = dict(
type='StrongSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
pad_size_divisor=32,
batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(576, 1024),
size_divisor=32,
interval=10)
]),
detector=detector,
reid=dict(
type='BaseReID',
data_preprocessor=None,
backbone=dict(
type='mmcls.ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=380,
loss_cls=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
cmc=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='StrongSORTTracker',
motion=dict(type='KalmanFilter', center_only=False, use_nsa=True),
obj_score_thr=0.6,
reid=dict(
num_samples=None,
img_scale=(256, 128),
img_norm_cfg=dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
match_score_thr=0.3,
motion_weight=0.02,
),
match_iou_thr=0.7,
momentums=dict(embeds=0.1, ),
num_tentatives=2,
num_frames_retain=100),
postprocess_model=dict(
type='AppearanceFreeLink',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth', # noqa: E501
temporal_threshold=(0, 30),
spatial_threshold=50,
confidence_threshold=0.95,
))
train_pipeline = None
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=_base_.img_scale, keep_ratio=True),
dict(
type='Pad',
size_divisor=32,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadTrackAnnotations'),
]),
dict(type='PackTrackInputs')
]
train_dataloader = None
val_dataloader = dict(
# Now StrongSORT only support video_based sampling
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=_base_.data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
# when you evaluate track performance, you need to remove metainfo
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
train_cfg = None
optim_wrapper = None
# evaluator
val_evaluator = dict(
_delete_=True,
type='MOTChallengeMetric',
metric=['HOTA', 'CLEAR', 'Identity'],
# use_postprocess to support AppearanceFreeLink in val_evaluator
use_postprocess=True,
postprocess_tracklet_cfg=[
dict(
type='InterpolateTracklets',
min_num_frames=5,
max_num_frames=20,
use_gsi=True,
smooth_tau=10)
])
test_evaluator = val_evaluator
default_hooks = dict(logger=dict(type='LoggerHook', interval=1))
del _base_.param_scheduler
del _base_.custom_hooks
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
from .ytvis import YTVIS
from .ytviseval import YTVISeval
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes',
'evaluateImgLists', 'YTVIS', 'YTVISeval'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes',
'evaluateImgLists'
]
|
_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './atss_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
from .gateway import HTTPGateway
|
import asyncio
import os
from jina import __default_host__
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
from jina.serve.runtimes.gateway.http.gateway import HTTPGateway
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
self.gateway = HTTPGateway(
name=self.name,
port=self.args.port,
title=self.args.title,
description=self.args.description,
no_debug_endpoints=self.args.no_debug_endpoints,
no_crud_endpoints=self.args.no_crud_endpoints,
expose_endpoints=self.args.expose_endpoints,
expose_graphql_endpoint=self.args.expose_graphql_endpoint,
cors=self.args.cors,
ssl_keyfile=self.args.ssl_keyfile,
ssl_certfile=self.args.ssl_certfile,
uvicorn_kwargs=self.args.uvicorn_kwargs,
)
self.gateway.set_streamer(
args=self.args,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
runtime_name=self.args.name,
)
await self.gateway.setup_server()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self.gateway.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self.gateway.teardown()
async def async_cancel(self):
"""Stop the server."""
await self.gateway.stop_server()
async def async_run_forever(self):
"""Running method of the server."""
await self.gateway.run_server()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA,
StochasticWeightAverage)
from .wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'MMDataParallel', 'is_model_wrapper',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
__all__ = ['MMDistributedDataParallel', 'MMDataParallel', 'is_model_wrapper']
|
from langchain_core.embeddings import DeterministicFakeEmbedding, Embeddings
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_tests.unit_tests import EmbeddingsUnitTests
class TestFakeEmbeddingsUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return DeterministicFakeEmbedding
@property
def embedding_model_params(self) -> dict:
return {"size": 6} # embedding dimension
class TestFakeEmbeddingsIntegration(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return DeterministicFakeEmbedding
@property
def embedding_model_params(self) -> dict:
return {"size": 6}
|
from typing import Type
from langchain_core.embeddings import DeterministicFakeEmbedding, Embeddings
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_tests.unit_tests import EmbeddingsUnitTests
class TestFakeEmbeddingsUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[Embeddings]:
return DeterministicFakeEmbedding
@property
def embedding_model_params(self) -> dict:
return {"size": 6} # embedding dimension
class TestFakeEmbeddingsIntegration(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> Type[Embeddings]:
return DeterministicFakeEmbedding
@property
def embedding_model_params(self) -> dict:
return {"size": 6}
|
"""Hive data reader."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class HiveReader(BaseReader):
"""
Read documents from a Hive.
These documents can then be used in a downstream Llama Index data structure.
Args:
host : What host HiveServer2 runs on
port : The port Hive Server runs on. Defaults to 10000.
auth : The value of hive.server2.authentication used by HiveServer2.
Defaults to ``NONE``
database: the database name
password: Use with auth='LDAP' or auth='CUSTOM' only
"""
def __init__(
self,
host: str,
port: Optional[int] = None,
database: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
auth: Optional[str] = None,
):
"""Initialize with parameters."""
try:
from pyhive import hive
except ImportError:
raise ImportError(
"`hive` package not found, please run `pip install pyhive`"
)
self.con = hive.Connection(
host=host,
port=port,
username=username,
database=database,
auth=auth,
password=password,
)
def load_data(self, query: str) -> List[Document]:
"""
Read data from the Hive.
Args:
query (str): The query used to query data from Hive
Returns:
List[Document]: A list of documents.
"""
try:
cursor = self.con.cursor().execute(query)
cursor.execute(query)
rows = cursor.fetchall()
except Exception:
raise Exception(
"Throws Exception in execution, please check your connection params and query "
)
documents = []
for row in rows:
documents = Document(text=row)
return documents
|
"""Hive data reader."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class HiveReader(BaseReader):
"""
Read documents from a Hive.
These documents can then be used in a downstream Llama Index data structure.
Args:
host : What host HiveServer2 runs on
port : The port Hive Server runs on. Defaults to 10000.
auth : The value of hive.server2.authentication used by HiveServer2.
Defaults to ``NONE``
database: the database name
password: Use with auth='LDAP' or auth='CUSTOM' only
"""
def __init__(
self,
host: str,
port: Optional[int] = None,
database: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
auth: Optional[str] = None,
):
"""Initialize with parameters."""
try:
from pyhive import hive
except ImportError:
raise ImportError(
"`hive` package not found, please run `pip install pyhive`"
)
self.con = hive.Connection(
host=host,
port=port,
username=username,
database=database,
auth=auth,
password=password,
)
def load_data(self, query: str) -> List[Document]:
"""Read data from the Hive.
Args:
query (str): The query used to query data from Hive
Returns:
List[Document]: A list of documents.
"""
try:
cursor = self.con.cursor().execute(query)
cursor.execute(query)
rows = cursor.fetchall()
except Exception:
raise Exception(
"Throws Exception in execution, please check your connection params and query "
)
documents = []
for row in rows:
documents = Document(text=row)
return documents
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import (
MotionAdapter,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
)
enable_full_determinism()
class MotionAdapterSingleFileTests(unittest.TestCase):
model_class = MotionAdapter
def test_single_file_components_version_v1_5(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_v1_5_2(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-2"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_v1_5_3(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-3"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_sdxl_beta(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import (
MotionAdapter,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
)
enable_full_determinism()
class MotionAdapterSingleFileTests(unittest.TestCase):
model_class = MotionAdapter
def test_single_file_components_version_v1_5(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_components_version_v1_5_2(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-2"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_components_version_v1_5_3(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-3"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_components_version_sdxl_beta(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
|
from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.array.abstract_array import AnyDocumentArray
from docarray.documents import Text
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDocument):
sub_sub_text: Text
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDocument):
sub_text: Text
sub_da: DocumentArray[SubSubDoc]
class MultiModalDoc(BaseDocument):
mm_text: Text
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocumentArray[SubDoc]
docs = DocumentArray[MultiModalDoc](
[
MultiModalDoc(
mm_text=Text(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=Text(text=f'sub_{i}_1'),
sub_da=DocumentArray[SubSubDoc](
[
SubSubDoc(
sub_sub_text=Text(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text.text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da.sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da.sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da.sub_da.sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.stack()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDocument()
input_list = [DocumentArray([doc, doc, doc])]
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray, Text
from docarray.array.abstract_array import AnyDocumentArray
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDocument):
sub_sub_text: Text
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDocument):
sub_text: Text
sub_da: DocumentArray[SubSubDoc]
class MultiModalDoc(BaseDocument):
mm_text: Text
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocumentArray[SubDoc]
docs = DocumentArray[MultiModalDoc](
[
MultiModalDoc(
mm_text=Text(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=Text(text=f'sub_{i}_1'),
sub_da=DocumentArray[SubSubDoc](
[
SubSubDoc(
sub_sub_text=Text(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text.text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da.sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da.sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da.sub_da.sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.stack()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDocument()
input_list = [DocumentArray([doc, doc, doc])]
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
import os
import torch
import torchaudio.prototype.transforms as T
import torchaudio.transforms as transforms
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
def assert_batch_consistency(self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42, **kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([transform(items_input[i], *args, **kwargs) for i in range(n)])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = transform(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode)
actual = convolve(x, y)
expected = torch.stack(
[
torch.stack(
[convolve(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0)).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_batch_BarkScale(self):
specgram = torch.randn(3, 2, 201, 256)
atol = 1e-6 if os.name == "nt" else 1e-8
transform = T.BarkScale()
self.assert_batch_consistency(transform, specgram, atol=atol)
def test_batch_InverseBarkScale(self):
n_barks = 32
n_stft = 5
bark_spec = torch.randn(3, 2, n_barks, 32) ** 2
transform = transforms.InverseMelScale(n_stft, n_barks)
# Because InverseBarkScale runs SGD on randomly initialized values so they do not yield
# exactly same result. For this reason, tolerance is very relaxed here.
self.assert_batch_consistency(transform, bark_spec, atol=1.0, rtol=1e-5)
def test_Speed(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
speed = T.Speed(orig_freq, factor)
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = speed(batched_input, input_lengths)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = speed(unbatched_input[idx], input_lengths[idx])
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
def test_SpeedPerturbation(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
speed = T.SpeedPerturbation(orig_freq, [factor])
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = speed(batched_input, input_lengths)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = speed(unbatched_input[idx], input_lengths[idx])
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
|
import os
import torch
import torchaudio.prototype.transforms as T
import torchaudio.transforms as transforms
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
def assert_batch_consistency(self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42, **kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([transform(items_input[i], *args, **kwargs) for i in range(n)])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = transform(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode)
actual = convolve(x, y)
expected = torch.stack(
[
torch.stack(
[convolve(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0)).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_batch_BarkScale(self):
specgram = torch.randn(3, 2, 201, 256)
atol = 1e-6 if os.name == "nt" else 1e-8
transform = T.BarkScale()
self.assert_batch_consistency(transform, specgram, atol=atol)
def test_batch_InverseBarkScale(self):
n_barks = 32
n_stft = 5
bark_spec = torch.randn(3, 2, n_barks, 32) ** 2
transform = transforms.InverseMelScale(n_stft, n_barks)
# Because InverseBarkScale runs SGD on randomly initialized values so they do not yield
# exactly same result. For this reason, tolerance is very relaxed here.
self.assert_batch_consistency(transform, bark_spec, atol=1.0, rtol=1e-5)
|
from __future__ import annotations
import sys
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .Router import Asym, Router
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
sys.modules["sentence_transformers.models.Asym"] = sys.modules["sentence_transformers.models.Router"]
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
"Router",
]
|
from __future__ import annotations
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
]
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.77%
Accuracy@3: 64.40%
Accuracy@5: 66.87%
Accuracy@10: 71.83%
Precision@1: 50.77%
Precision@3: 40.45%
Precision@5: 34.06%
Precision@10: 25.98%
Recall@1: 6.27%
Recall@3: 11.69%
Recall@5: 13.74%
Recall@10: 17.23%
MRR@10: 0.5814
NDCG@10: 0.3621
MAP@100: 0.1838
Model Query Sparsity: Active Dimensions: 40.0, Sparsity Ratio: 0.9987
Model Corpus Sparsity: Active Dimensions: 206.2, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.Primary metric value: 0.3621
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Query Sparsity: Active Dimensions: 43.1, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 207.0, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Generic, List, Sequence, Type, TypeVar, Union
from docarray.document import BaseDocument
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from docarray.proto import DocumentArrayProto, NodeProto
from docarray.typing import NdArray, TorchTensor
T = TypeVar('T', bound='AnyDocumentArray')
T_doc = TypeVar('T_doc', bound=BaseDocument)
class AnyDocumentArray(Sequence[BaseDocument], Generic[T_doc], AbstractType):
document_type: Type[BaseDocument]
def __class_getitem__(cls, item: Type[BaseDocument]):
if not issubclass(item, BaseDocument):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
class _DocumentArrayTyped(cls): # type: ignore
document_type: Type[BaseDocument] = item
for field in _DocumentArrayTyped.document_type.__fields__.keys():
def _property_generator(val: str):
def _getter(self):
return self._get_array_attribute(val)
def _setter(self, value):
self._set_array_attribute(val, value)
# need docstring for the property
return property(fget=_getter, fset=_setter)
setattr(_DocumentArrayTyped, field, _property_generator(field))
# this generates property on the fly based on the schema of the item
_DocumentArrayTyped.__name__ = f'DocumentArray[{item.__name__}]'
_DocumentArrayTyped.__qualname__ = f'DocumentArray[{item.__name__}]'
return _DocumentArrayTyped
@abstractmethod
def _get_array_attribute(
self: T,
field: str,
) -> Union[List, T, 'TorchTensor', 'NdArray']:
"""Return all values of the fields from all docs this array contains
:param field: name of the fields to extract
:return: Returns a list of the field value for each document
in the array like container
"""
...
@abstractmethod
def _set_array_attribute(
self: T,
field: str,
values: Union[List, T, 'TorchTensor', 'NdArray'],
):
"""Set all Documents in this DocumentArray using the passed values
:param field: name of the fields to extract
:values: the values to set at the DocumentArray level
"""
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentArrayProto') -> T:
"""create a Document from a protobuf message"""
...
@abstractmethod
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message"""
...
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert a DocumentArray into a NodeProto protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(chunks=self.to_protobuf())
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Type, Union
from docarray.document import BaseDocument
if TYPE_CHECKING:
from docarray.typing import NdArray, TorchTensor
class AbstractDocumentArray(Sequence):
document_type: Type[BaseDocument]
_columns: Optional[
Dict[str, Union['TorchTensor', 'AbstractDocumentArray', 'NdArray', None]]
] # here columns are the holder of the data in tensor modes
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem__(
cls, item: Type[BaseDocument]
) -> Type['AbstractDocumentArray']:
...
@abstractmethod
def is_stacked(self) -> bool:
...
@abstractmethod
def _column_fields(self) -> List[str]:
...
@abstractmethod
def __iter_over_stacked_documents__(self) -> Iterable[BaseDocument]:
...
|
_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
max_iter = 90000
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=10000)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
|
_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=10000)
evaluation = dict(interval=10000, metric='bbox')
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
from backend.integrations.providers import ProviderName
from backend.util.settings import Config
app_config = Config()
# TODO: add test to assert this matches the actual API route
def webhook_ingress_url(provider_name: ProviderName, webhook_id: str) -> str:
return (
f"{app_config.platform_base_url}/api/integrations/{provider_name.value}"
f"/webhooks/{webhook_id}/ingress"
)
|
from backend.integrations.providers import ProviderName
from backend.util.settings import Config
app_config = Config()
# TODO: add test to assert this matches the actual API route
def webhook_ingress_url(provider_name: ProviderName, webhook_id: str) -> str:
return (
f"{app_config.platform_base_url}/api/integrations/{provider_name}"
f"/webhooks/{webhook_id}/ingress"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.data_elements.mask import mask_target
from mmdet.models.roi_heads.mask_heads import MaskIoUHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestMaskIoUHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_loss_and_target(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
train_cfg = ConfigDict(dict(mask_size=28, mask_thr_binary=0.5))
# prepare ground truth
data_samples = [
inputs['data_sample'].to(device=device) for inputs in packed_inputs
]
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare mask feats, pred and target
pos_proposals = [res.pos_priors for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
gt_masks = [res.masks for res in batch_gt_instances]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, train_cfg)
mask_feats = torch.rand((mask_targets.size(0), 256, 14, 14)).to(device)
mask_preds = torch.rand((mask_targets.size(0), 4, 28, 28)).to(device)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
pos_mask_pred = mask_preds[range(mask_preds.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(mask_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
pos_labels]
mask_iou_head.loss_and_target(pos_mask_iou_pred, pos_mask_pred,
mask_targets, sampling_results,
batch_gt_instances, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
mask_preds = torch.rand((num_samples, num_classes, 28, 28)).to(device)
mask_iou_preds = mask_iou_head(
mask_feats, mask_preds[range(results.labels.size(0)),
results.labels])
mask_iou_head.predict_by_feat(
mask_iou_preds=[mask_iou_preds], results_list=[results])
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.core import mask_target
from mmdet.models.roi_heads.mask_heads import MaskIoUHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestMaskIoUHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_loss_and_target(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
train_cfg = ConfigDict(dict(mask_size=28, mask_thr_binary=0.5))
# prepare ground truth
data_samples = [
inputs['data_sample'].to(device=device) for inputs in packed_inputs
]
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare mask feats, pred and target
pos_proposals = [res.pos_priors for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
gt_masks = [res.masks for res in batch_gt_instances]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, train_cfg)
mask_feats = torch.rand((mask_targets.size(0), 256, 14, 14)).to(device)
mask_preds = torch.rand((mask_targets.size(0), 4, 28, 28)).to(device)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
pos_mask_pred = mask_preds[range(mask_preds.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(mask_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
pos_labels]
mask_iou_head.loss_and_target(pos_mask_iou_pred, pos_mask_pred,
mask_targets, sampling_results,
batch_gt_instances, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
mask_preds = torch.rand((num_samples, num_classes, 28, 28)).to(device)
mask_iou_preds = mask_iou_head(
mask_feats, mask_preds[range(results.labels.size(0)),
results.labels])
mask_iou_head.predict_by_feat(
mask_iou_preds=[mask_iou_preds], results_list=[results])
|
"""LLM Compiler Output Parser."""
import re
from typing import Any, Dict, List, Sequence
from llama_index.core.tools import BaseTool
from llama_index.core.types import BaseOutputParser
from .schema import JoinerOutput, LLMCompilerParseResult
from .utils import get_graph_dict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
ACTION_PATTERN = r"\n*(\d+)\. (\w+)\((.*)\)(\s*#\w+\n)?"
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
END_OF_PLAN = "<END_OF_PLAN>"
JOINER_REPLAN = "Replan"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
class LLMCompilerPlanParser(BaseOutputParser):
"""
LLM Compiler plan output parser.
Directly adapted from source code: https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/output_parser.py.
"""
def __init__(self, tools: Sequence[BaseTool]):
"""Init params."""
self.tools = tools
def parse(self, text: str) -> Dict[int, Any]:
# 1. search("Ronaldo number of kids") -> 1, "search", '"Ronaldo number of kids"'
# pattern = r"(\d+)\. (\w+)\(([^)]+)\)"
pattern = rf"(?:{THOUGHT_PATTERN}\n)?{ACTION_PATTERN}"
matches = re.findall(pattern, text)
# convert matches to a list of LLMCompilerParseResult
results: List[LLMCompilerParseResult] = []
for match in matches:
thought, idx, tool_name, args, _ = match
idx = int(idx)
results.append(
LLMCompilerParseResult(
thought=thought, idx=idx, tool_name=tool_name, args=args
)
)
# get graph dict
return get_graph_dict(results, self.tools)
### Helper functions
class LLMCompilerJoinerParser(BaseOutputParser):
"""
LLM Compiler output parser for the join step.
Adapted from _parse_joiner_output in
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
"""
def parse(self, text: str) -> JoinerOutput:
"""Parse."""
thought, answer, is_replan = "", "", False # default values
raw_answers = text.split("\n")
for answer in raw_answers:
if answer.startswith("Action:"):
answer = answer[answer.find("(") + 1 : answer.find(")")]
is_replan = JOINER_REPLAN in answer
elif answer.startswith("Thought:"):
thought = answer.split("Thought:")[1].strip()
return JoinerOutput(thought=thought, answer=answer, is_replan=is_replan)
|
"""LLM Compiler Output Parser."""
import re
from typing import Any, Dict, List, Sequence
from llama_index.core.tools import BaseTool
from llama_index.core.types import BaseOutputParser
from .schema import JoinerOutput, LLMCompilerParseResult
from .utils import get_graph_dict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
ACTION_PATTERN = r"\n*(\d+)\. (\w+)\((.*)\)(\s*#\w+\n)?"
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
END_OF_PLAN = "<END_OF_PLAN>"
JOINER_REPLAN = "Replan"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
class LLMCompilerPlanParser(BaseOutputParser):
"""LLM Compiler plan output parser.
Directly adapted from source code: https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/output_parser.py.
"""
def __init__(self, tools: Sequence[BaseTool]):
"""Init params."""
self.tools = tools
def parse(self, text: str) -> Dict[int, Any]:
# 1. search("Ronaldo number of kids") -> 1, "search", '"Ronaldo number of kids"'
# pattern = r"(\d+)\. (\w+)\(([^)]+)\)"
pattern = rf"(?:{THOUGHT_PATTERN}\n)?{ACTION_PATTERN}"
matches = re.findall(pattern, text)
# convert matches to a list of LLMCompilerParseResult
results: List[LLMCompilerParseResult] = []
for match in matches:
thought, idx, tool_name, args, _ = match
idx = int(idx)
results.append(
LLMCompilerParseResult(
thought=thought, idx=idx, tool_name=tool_name, args=args
)
)
# get graph dict
return get_graph_dict(results, self.tools)
### Helper functions
class LLMCompilerJoinerParser(BaseOutputParser):
"""LLM Compiler output parser for the join step.
Adapted from _parse_joiner_output in
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
"""
def parse(self, text: str) -> JoinerOutput:
"""Parse."""
thought, answer, is_replan = "", "", False # default values
raw_answers = text.split("\n")
for answer in raw_answers:
if answer.startswith("Action:"):
answer = answer[answer.find("(") + 1 : answer.find(")")]
is_replan = JOINER_REPLAN in answer
elif answer.startswith("Thought:"):
thought = answer.split("Thought:")[1].strip()
return JoinerOutput(thought=thought, answer=answer, is_replan=is_replan)
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
def _append(self, value: 'Document'):
self.extend([value])
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
self._update_subindices_append_extend(docs)
def append(self, value: 'Document'):
self.extend([value])
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user() -> User:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser#example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{a}, {b}{c}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user() -> User:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser#example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{a}, {b}{c}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = server.agent_server.execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
__version__ = '0.15.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.15.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from docarray.proto.pb2.docarray_pb2 import DocumentArrayProto, DocumentProto, NdArrayProto, NodeProto
|
from .pb2.docarray_pb2 import DocumentArrayProto, DocumentProto, NdArrayProto, NodeProto
|
_base_ = './ga-rpn_r50-caffe_fpn_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
from urllib.parse import quote
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class FactCheckerBlock(Block):
class Input(BlockSchema):
statement: str = SchemaField(
description="The statement to check for factuality"
)
credentials: JinaCredentialsInput = JinaCredentialsField()
class Output(BlockSchema):
factuality: float = SchemaField(
description="The factuality score of the statement"
)
result: bool = SchemaField(description="The result of the factuality check")
reason: str = SchemaField(description="The reason for the factuality result")
error: str = SchemaField(description="Error message if the check fails")
def __init__(self):
super().__init__(
id="d38b6c5e-9968-4271-8423-6cfe60d6e7e6",
description="This block checks the factuality of a given statement using Jina AI's Grounding API.",
categories={BlockCategory.SEARCH},
input_schema=FactCheckerBlock.Input,
output_schema=FactCheckerBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
encoded_statement = quote(input_data.statement)
url = f"https://g.jina.ai/{encoded_statement}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
response = requests.get(url, headers=headers)
response.raise_for_status()
data = response.json()
if "data" in data:
data = data["data"]
yield "factuality", data["factuality"]
yield "result", data["result"]
yield "reason", data["reason"]
else:
raise RuntimeError(f"Expected 'data' key not found in response: {data}")
|
from urllib.parse import quote
import requests
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class FactCheckerBlock(Block):
class Input(BlockSchema):
statement: str = SchemaField(
description="The statement to check for factuality"
)
credentials: JinaCredentialsInput = JinaCredentialsField()
class Output(BlockSchema):
factuality: float = SchemaField(
description="The factuality score of the statement"
)
result: bool = SchemaField(description="The result of the factuality check")
reason: str = SchemaField(description="The reason for the factuality result")
error: str = SchemaField(description="Error message if the check fails")
def __init__(self):
super().__init__(
id="d38b6c5e-9968-4271-8423-6cfe60d6e7e6",
description="This block checks the factuality of a given statement using Jina AI's Grounding API.",
categories={BlockCategory.SEARCH},
input_schema=FactCheckerBlock.Input,
output_schema=FactCheckerBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
encoded_statement = quote(input_data.statement)
url = f"https://g.jina.ai/{encoded_statement}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
response = requests.get(url, headers=headers)
response.raise_for_status()
data = response.json()
if "data" in data:
data = data["data"]
yield "factuality", data["factuality"]
yield "result", data["result"]
yield "reason", data["reason"]
else:
raise RuntimeError(f"Expected 'data' key not found in response: {data}")
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
from mmdet.utils.typing import Tuple
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
pred_sample=result,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
from mmdet.utils.typing import Tuple
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(mmcv.track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
pred_sample=result,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
from keras.src import activations
from keras.src import applications
from keras.src import backend
from keras.src import constraints
from keras.src import datasets
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import regularizers
from keras.src import utils
from keras.src import visualization
from keras.src.backend import KerasTensor
from keras.src.layers import Input
from keras.src.layers import Layer
from keras.src.models import Functional
from keras.src.models import Model
from keras.src.models import Sequential
from keras.src.version import __version__
|
from keras.src import activations
from keras.src import applications
from keras.src import backend
from keras.src import constraints
from keras.src import datasets
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import regularizers
from keras.src import utils
from keras.src.backend import KerasTensor
from keras.src.layers import Input
from keras.src.layers import Layer
from keras.src.models import Functional
from keras.src.models import Model
from keras.src.models import Sequential
from keras.src.version import __version__
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv import ops
from mmcv.runner import BaseModule
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
init_cfg=None):
super(BaseRoIExtractor, self).__init__(init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg, featmap_strides):
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (dict): Dictionary to construct and config RoI layer
operation. Options are modules under ``mmcv/ops`` such as
``RoIAlign``.
featmap_strides (List[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
nn.ModuleList: The RoI extractor modules for each level feature
map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois, scale_factor):
"""Scale RoI coordinates by scale factor.
Args:
rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
torch.Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self, feats, rois, roi_scale_factor=None):
pass
|
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv import ops
from mmcv.runner import BaseModule
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
init_cfg=None):
super(BaseRoIExtractor, self).__init__(init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg, featmap_strides):
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (dict): Dictionary to construct and config RoI layer
operation. Options are modules under ``mmcv/ops`` such as
``RoIAlign``.
featmap_strides (List[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
nn.ModuleList: The RoI extractor modules for each level feature
map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois, scale_factor):
"""Scale RoI coordinates by scale factor.
Args:
rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
torch.Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self, feats, rois, roi_scale_factor=None):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from ..registry import HOOKS
from ..utils import get_git_hash
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g. ``epoch``, ``iter``, ``max_epochs``, and ``max_iters`` for the
training state. Components that cannot access the runner can get runtime
information through the message hub.
"""
priority = 'VERY_HIGH'
def before_run(self, runner) -> None:
import mmengine
metainfo = dict(
cfg=runner.cfg.pretty_text,
seed=runner.seed,
experiment_name=runner.experiment_name,
mmengine_version=mmengine.__version__ + get_git_hash())
runner.message_hub.update_info_dict(metainfo)
def before_train(self, runner) -> None:
"""Update resumed training state."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
runner.message_hub.update_info('max_epochs', runner.max_epochs)
runner.message_hub.update_info('max_iters', runner.max_iters)
if hasattr(runner.train_dataloader.dataset, 'dataset_meta'):
runner.message_hub.update_info(
'dataset_meta', runner.train_dataloader.dataset.metainfo)
def before_train_epoch(self, runner) -> None:
"""Update current epoch information before every epoch."""
runner.message_hub.update_info('epoch', runner.epoch)
def before_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None) -> None:
"""Update current iter and learning rate information before every
iteration."""
runner.message_hub.update_info('iter', runner.iter)
lr_dict = runner.optim_wrapper.get_lr()
assert isinstance(lr_dict, dict), (
'`runner.optim_wrapper.get_lr()` should return a dict '
'of learning rate when training with OptimWrapper(single '
'optimizer) or OptimWrapperDict(multiple optimizer), '
f'but got {type(lr_dict)} please check your optimizer '
'constructor return an `OptimWrapper` or `OptimWrapperDict` '
'instance')
for name, lr in lr_dict.items():
runner.message_hub.update_scalar(f'train/{name}', lr[0])
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ``log_vars`` in model outputs every iteration."""
if outputs is not None:
for key, value in outputs.items():
runner.message_hub.update_scalar(f'train/{key}', value)
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each validation epoch.
Args:
runner (Runner): The runner of the validation process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on validation dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'val/{key}', value)
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each test epoch.
Args:
runner (Runner): The runner of the testing process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on test dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'test/{key}', value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from ..registry import HOOKS
from ..utils import get_git_hash
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g. ``epoch``, ``iter``, ``max_epochs``, and ``max_iters`` for the
training state. Components that cannot access the runner can get runtime
information through the message hub.
"""
priority = 'VERY_HIGH'
def before_run(self, runner) -> None:
import mmengine
metainfo = dict(
cfg=runner.cfg.pretty_text,
seed=runner.seed,
experiment_name=runner.experiment_name,
mmengine_version=mmengine.__version__ + get_git_hash())
runner.message_hub.update_info_dict(metainfo)
def before_train(self, runner) -> None:
"""Update resumed training state."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
runner.message_hub.update_info('max_epochs', runner.max_epochs)
runner.message_hub.update_info('max_iters', runner.max_iters)
runner.message_hub.update_info(
'dataset_meta', runner.train_dataloader.dataset.metainfo)
def before_train_epoch(self, runner) -> None:
"""Update current epoch information before every epoch."""
runner.message_hub.update_info('epoch', runner.epoch)
def before_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None) -> None:
"""Update current iter and learning rate information before every
iteration."""
runner.message_hub.update_info('iter', runner.iter)
lr_dict = runner.optim_wrapper.get_lr()
assert isinstance(lr_dict, dict), (
'`runner.optim_wrapper.get_lr()` should return a dict '
'of learning rate when training with OptimWrapper(single '
'optimizer) or OptimWrapperDict(multiple optimizer), '
f'but got {type(lr_dict)} please check your optimizer '
'constructor return an `OptimWrapper` or `OptimWrapperDict` '
'instance')
for name, lr in lr_dict.items():
runner.message_hub.update_scalar(f'train/{name}', lr[0])
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ``log_vars`` in model outputs every iteration."""
if outputs is not None:
for key, value in outputs.items():
runner.message_hub.update_scalar(f'train/{key}', value)
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each validation epoch.
Args:
runner (Runner): The runner of the validation process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on validation dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'val/{key}', value)
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each test epoch.
Args:
runner (Runner): The runner of the testing process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on test dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'test/{key}', value)
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
|
# Copyright (c) OpenMMLab. All rights reserved.
from .compare import (assert_allclose, assert_attrs_equal,
assert_dict_contains_subset, assert_dict_has_keys,
assert_is_norm_layer, assert_keys_equal,
assert_params_all_zeros, check_python_script)
__all__ = [
'assert_allclose', 'assert_dict_contains_subset', 'assert_keys_equal',
'assert_attrs_equal', 'assert_dict_has_keys', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .compare import assert_allclose
__all__ = ['assert_allclose']
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, Series
PANDAS_INSTALLED = True
except ImportError:
DataFrame = object
Series = object
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn import __version__ as _sklearn_version
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
try:
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
# sklearn.utils Tags types can be imported unconditionally once
# xgboost's minimum scikit-learn version is 1.6 or higher
try:
from sklearn.utils import Tags as _sklearn_Tags
except ImportError:
_sklearn_Tags = object
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
class XGBModelBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.BaseEstimator."""
class XGBClassifierBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.ClassifierMixin."""
class XGBRegressorBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.RegressorMixin."""
XGBStratifiedKFold = None
_sklearn_Tags = object
_sklearn_version = object
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
from pandas import concat as pd_concat
return pd_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, Series
PANDAS_INSTALLED = True
except ImportError:
DataFrame = object
Series = object
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
from sklearn.preprocessing import LabelEncoder
try:
from sklearn.model_selection import KFold as XGBKFold
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import KFold as XGBKFold
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
XGBModelBase = object
XGBClassifierBase = object
XGBRegressorBase = object
LabelEncoder = object
XGBKFold = None
XGBStratifiedKFold = None
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
from pandas import concat as pd_concat
return pd_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
|
import logging
import aiohttp
from fastapi import APIRouter
from backend.util.settings import Settings
from .models import TurnstileVerifyRequest, TurnstileVerifyResponse
logger = logging.getLogger(__name__)
router = APIRouter()
settings = Settings()
@router.post(
"/verify", response_model=TurnstileVerifyResponse, summary="Verify Turnstile Token"
)
async def verify_turnstile_token(
request: TurnstileVerifyRequest,
) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token.
This endpoint verifies a token returned by the Cloudflare Turnstile challenge
on the client side. It returns whether the verification was successful.
"""
logger.info(f"Verifying Turnstile token for action: {request.action}")
return await verify_token(request)
async def verify_token(request: TurnstileVerifyRequest) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token by making a request to the Cloudflare API.
"""
# Get the secret key from settings
turnstile_secret_key = settings.secrets.turnstile_secret_key
turnstile_verify_url = settings.secrets.turnstile_verify_url
if not turnstile_secret_key:
logger.error(
"Turnstile secret key missing. Set TURNSTILE_SECRET_KEY to enable verification."
)
return TurnstileVerifyResponse(
success=False,
error="CONFIGURATION_ERROR",
challenge_timestamp=None,
hostname=None,
action=None,
)
try:
async with aiohttp.ClientSession() as session:
payload = {
"secret": turnstile_secret_key,
"response": request.token,
}
if request.action:
payload["action"] = request.action
logger.debug(f"Verifying Turnstile token with action: {request.action}")
async with session.post(
turnstile_verify_url,
data=payload,
timeout=aiohttp.ClientTimeout(total=10),
) as response:
if response.status != 200:
error_text = await response.text()
logger.error(f"Turnstile API error: {error_text}")
return TurnstileVerifyResponse(
success=False,
error=f"API_ERROR: {response.status}",
challenge_timestamp=None,
hostname=None,
action=None,
)
data = await response.json()
logger.debug(f"Turnstile API response: {data}")
# Parse the response and return a structured object
return TurnstileVerifyResponse(
success=data.get("success", False),
error=(
data.get("error-codes", None)[0]
if data.get("error-codes")
else None
),
challenge_timestamp=data.get("challenge_timestamp"),
hostname=data.get("hostname"),
action=data.get("action"),
)
except aiohttp.ClientError as e:
logger.error(f"Connection error to Turnstile API: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"CONNECTION_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
except Exception as e:
logger.error(f"Unexpected error in Turnstile verification: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"UNEXPECTED_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
|
import logging
import aiohttp
from fastapi import APIRouter
from backend.util.settings import Settings
from .models import TurnstileVerifyRequest, TurnstileVerifyResponse
logger = logging.getLogger(__name__)
router = APIRouter()
settings = Settings()
@router.post("/verify", response_model=TurnstileVerifyResponse)
async def verify_turnstile_token(
request: TurnstileVerifyRequest,
) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token.
This endpoint verifies a token returned by the Cloudflare Turnstile challenge
on the client side. It returns whether the verification was successful.
"""
logger.info(f"Verifying Turnstile token for action: {request.action}")
return await verify_token(request)
async def verify_token(request: TurnstileVerifyRequest) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token by making a request to the Cloudflare API.
"""
# Get the secret key from settings
turnstile_secret_key = settings.secrets.turnstile_secret_key
turnstile_verify_url = settings.secrets.turnstile_verify_url
if not turnstile_secret_key:
logger.error(
"Turnstile secret key missing. Set TURNSTILE_SECRET_KEY to enable verification."
)
return TurnstileVerifyResponse(
success=False,
error="CONFIGURATION_ERROR",
challenge_timestamp=None,
hostname=None,
action=None,
)
try:
async with aiohttp.ClientSession() as session:
payload = {
"secret": turnstile_secret_key,
"response": request.token,
}
if request.action:
payload["action"] = request.action
logger.debug(f"Verifying Turnstile token with action: {request.action}")
async with session.post(
turnstile_verify_url,
data=payload,
timeout=aiohttp.ClientTimeout(total=10),
) as response:
if response.status != 200:
error_text = await response.text()
logger.error(f"Turnstile API error: {error_text}")
return TurnstileVerifyResponse(
success=False,
error=f"API_ERROR: {response.status}",
challenge_timestamp=None,
hostname=None,
action=None,
)
data = await response.json()
logger.debug(f"Turnstile API response: {data}")
# Parse the response and return a structured object
return TurnstileVerifyResponse(
success=data.get("success", False),
error=(
data.get("error-codes", None)[0]
if data.get("error-codes")
else None
),
challenge_timestamp=data.get("challenge_timestamp"),
hostname=data.get("hostname"),
action=data.get("action"),
)
except aiohttp.ClientError as e:
logger.error(f"Connection error to Turnstile API: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"CONNECTION_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
except Exception as e:
logger.error(f"Unexpected error in Turnstile verification: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"UNEXPECTED_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.