input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeLambdaSchedulerCallback
__all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.callbacks.splade_callbacks import (
SchedulerType,
SpladeLambdaSchedulerCallback,
)
__all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
|
import logging
import requests
from typing import List, Optional
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.bridge.pydantic import PrivateAttr
logger = logging.getLogger(__name__)
class OutlookEmailReader(BasePydanticReader):
"""
Outlook Emails Reader using Microsoft Graph API.
Reads emails from a given Outlook mailbox and indexes the subject and body.
Args:
client_id (str): The Application ID for the app registered in Microsoft Azure.
client_secret (str): The application secret for the app registered in Azure.
tenant_id (str): Unique identifier of the Azure Active Directory Instance.
user_email (str): Email address of the user whose emails need to be fetched.
folder (Optional[str]): The email folder to fetch emails from. Defaults to "Inbox".
num_mails (int): Number of emails to retrieve. Defaults to 10.
"""
client_id: str
client_secret: str
tenant_id: str
user_email: str
folder: Optional[str] = "Inbox"
num_mails: int = 10
_authorization_headers: Optional[dict] = PrivateAttr(default=None)
def __init__(
self,
client_id: str,
client_secret: str,
tenant_id: str,
user_email: str,
folder: Optional[str] = "Inbox",
num_mails: int = 10,
):
super().__init__(
client_id=client_id,
client_secret=client_secret,
tenant_id=tenant_id,
user_email=user_email,
folder=folder,
num_mails=num_mails,
)
def _ensure_token(self):
"""Ensures we have a valid access token."""
if self._authorization_headers is None:
token = self._get_access_token()
self._authorization_headers = {"Authorization": f"Bearer {token}"}
def _get_access_token(self) -> str:
"""Fetches the OAuth token from Microsoft."""
token_url = f"https://login.microsoftonline.com/{self.tenant_id}/oauth2/token"
payload = {
"grant_type": "client_credentials",
"client_id": self.client_id,
"client_secret": self.client_secret,
"resource": "https://graph.microsoft.com/",
}
response = requests.post(token_url, data=payload)
response.raise_for_status()
return response.json().get("access_token")
def _fetch_emails(self) -> List[dict]:
"""Fetches emails from the specified folder."""
self._ensure_token()
url = f"https://graph.microsoft.com/v1.0/users/{self.user_email}/mailFolders/{self.folder}/messages?$top={self.num_mails}"
response = requests.get(url, headers=self._authorization_headers)
response.raise_for_status()
return response.json().get("value", [])
def load_data(self) -> List[str]:
"""Loads emails as texts containing subject and body."""
emails = self._fetch_emails()
email_texts = []
for email in emails:
subject = email.get("subject", "No Subject")
body = email.get("body", {}).get("content", "No Content")
email_texts.append(f"Subject: {subject}\n\n{body}")
return email_texts
|
import logging
import requests
from typing import List, Optional
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.bridge.pydantic import PrivateAttr
logger = logging.getLogger(__name__)
class OutlookEmailReader(BasePydanticReader):
"""
Outlook Emails Reader using Microsoft Graph API.
Reads emails from a given Outlook mailbox and indexes the subject and body.
Args:
client_id (str): The Application ID for the app registered in Microsoft Azure.
client_secret (str): The application secret for the app registered in Azure.
tenant_id (str): Unique identifier of the Azure Active Directory Instance.
user_email (str): Email address of the user whose emails need to be fetched.
folder (Optional[str]): The email folder to fetch emails from. Defaults to "Inbox".
num_mails (int): Number of emails to retrieve. Defaults to 10.
"""
client_id: str
client_secret: str
tenant_id: str
user_email: str
folder: Optional[str] = "Inbox"
num_mails: int = 10
_authorization_headers: Optional[dict] = PrivateAttr(default=None)
def __init__(
self,
client_id: str,
client_secret: str,
tenant_id: str,
user_email: str,
folder: Optional[str] = "Inbox",
num_mails: int = 10,
):
super().__init__(
client_id=client_id,
client_secret=client_secret,
tenant_id=tenant_id,
user_email=user_email,
folder=folder,
num_mails=num_mails,
)
def _ensure_token(self):
"""Ensures we have a valid access token."""
if self._authorization_headers is None:
token = self._get_access_token()
self._authorization_headers = {"Authorization": f"Bearer {token}"}
def _get_access_token(self) -> str:
"""Fetches the OAuth token from Microsoft."""
token_url = f"https://login.microsoftonline.com/{self.tenant_id}/oauth2/token"
payload = {
"grant_type": "client_credentials",
"client_id": self.client_id,
"client_secret": self.client_secret,
"resource": "https://graph.microsoft.com/",
}
response = requests.post(token_url, data=payload)
response.raise_for_status()
return response.json().get("access_token")
def _fetch_emails(self) -> List[dict]:
"""Fetches emails from the specified folder."""
self._ensure_token()
url = f"https://graph.microsoft.com/v1.0/users/{self.user_email}/mailFolders/{self.folder}/messages?$top={self.num_mails}"
response = requests.get(url, headers=self._authorization_headers)
response.raise_for_status()
return response.json().get("value", [])
def load_data(self) -> List[str]:
"""Loads emails as texts containing subject and body."""
emails = self._fetch_emails()
email_texts = []
for email in emails:
subject = email.get("subject", "No Subject")
body = email.get("body", {}).get("content", "No Content")
email_texts.append(f"Subject: {subject}\n\n{body}")
return email_texts
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from setuptools import find_packages, setup
def get_requirements(path: str):
return [l.strip() for l in open(path)]
setup(
name="llama",
version="0.0.1",
packages=find_packages(),
install_requires=get_requirements("requirements.txt"),
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from setuptools import setup, find_packages
setup(name="llama", version="0.0.0", packages=find_packages())
|
from typing import List, Optional
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default=[],
)
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
from typing import List, Optional
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: Optional[dict] = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: Optional[dict] = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: Optional[dict] = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default=[],
)
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, UMT5EncoderModel
from diffusers import (
AuraFlowPipeline,
AuraFlowTransformer2DModel,
FlowMatchEulerDiscreteScheduler,
)
from diffusers.utils.testing_utils import (
floats_tensor,
is_peft_available,
require_peft_backend,
)
if is_peft_available():
pass
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = AuraFlowPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"sample_size": 64,
"patch_size": 1,
"in_channels": 4,
"num_mmdit_layers": 1,
"num_single_dit_layers": 1,
"attention_head_dim": 16,
"num_attention_heads": 2,
"joint_attention_dim": 32,
"caption_projection_dim": 32,
"pos_embed_max_size": 64,
}
transformer_cls = AuraFlowTransformer2DModel
vae_kwargs = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"block_out_channels": (4,),
"layers_per_block": 1,
"latent_channels": 4,
"norm_num_groups": 1,
"use_quant_conv": False,
"use_post_quant_conv": False,
"shift_factor": 0.0609,
"scaling_factor": 1.5035,
}
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = UMT5EncoderModel, "hf-internal-testing/tiny-random-umt5"
text_encoder_target_modules = ["q", "k", "v", "o"]
denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0", "linear_1"]
@property
def output_shape(self):
return (1, 8, 8, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 10
num_channels = 4
sizes = (32, 32)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "A painting of a squirrel eating a burger",
"num_inference_steps": 4,
"guidance_scale": 0.0,
"height": 8,
"width": 8,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
@unittest.skip("Not supported in AuraFlow.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in AuraFlow.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in AuraFlow.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, UMT5EncoderModel
from diffusers import (
AuraFlowPipeline,
AuraFlowTransformer2DModel,
FlowMatchEulerDiscreteScheduler,
)
from diffusers.utils.testing_utils import (
floats_tensor,
is_peft_available,
require_peft_backend,
)
if is_peft_available():
pass
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = AuraFlowPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"sample_size": 64,
"patch_size": 1,
"in_channels": 4,
"num_mmdit_layers": 1,
"num_single_dit_layers": 1,
"attention_head_dim": 16,
"num_attention_heads": 2,
"joint_attention_dim": 32,
"caption_projection_dim": 32,
"pos_embed_max_size": 64,
}
transformer_cls = AuraFlowTransformer2DModel
vae_kwargs = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"block_out_channels": (4,),
"layers_per_block": 1,
"latent_channels": 4,
"norm_num_groups": 1,
"use_quant_conv": False,
"use_post_quant_conv": False,
"shift_factor": 0.0609,
"scaling_factor": 1.5035,
}
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = UMT5EncoderModel, "hf-internal-testing/tiny-random-umt5"
text_encoder_target_modules = ["q", "k", "v", "o"]
denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0", "linear_1"]
@property
def output_shape(self):
return (1, 8, 8, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 10
num_channels = 4
sizes = (32, 32)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "A painting of a squirrel eating a burger",
"num_inference_steps": 4,
"guidance_scale": 0.0,
"height": 8,
"width": 8,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
@unittest.skip("Not supported in AuraFlow.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in AuraFlow.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in AuraFlow.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.13"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.12"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""Tool for the Merriam-Webster API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
class MerriamWebsterQueryRun(BaseTool):
"""Tool that searches the Merriam-Webster API."""
name: str = "merriam_webster"
description: str = (
"A wrapper around Merriam-Webster. "
"Useful for when you need to get the definition of a word."
"Input should be the word you want the definition of."
)
api_wrapper: MerriamWebsterAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Merriam-Webster tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Merriam-Webster API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
class MerriamWebsterQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Merriam-Webster API."""
name: str = "merriam_webster"
description: str = (
"A wrapper around Merriam-Webster. "
"Useful for when you need to get the definition of a word."
"Input should be the word you want the definition of."
)
api_wrapper: MerriamWebsterAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Merriam-Webster tool."""
return self.api_wrapper.run(query)
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal
@_register_explicit_noop(datapoints.Mask, datapoints.BoundingBoxes, warn_passthrough=True)
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal
@_register_explicit_noop(datapoints.Mask, datapoints.BoundingBoxes, warn_passthrough=True)
def erase(
inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]:
if torch.jit.is_scripting():
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
def rgb_to_grayscale(images, data_format=None):
raise NotImplementedError(
"`rgb_to_grayscale` is not supported with openvino backend"
)
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format="channels_last",
):
raise NotImplementedError("`resize` is not supported with openvino backend")
def affine_transform(
images,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
):
raise NotImplementedError(
"`affine_transform` is not supported with openvino backend"
)
def map_coordinates(
inputs, coordinates, order, fill_mode="constant", fill_value=0
):
raise NotImplementedError(
"`map_coordinates` is not supported with openvino backend"
)
|
def rgb_to_grayscale(image, data_format="channels_last"):
raise NotImplementedError(
"`rgb_to_grayscale` is not supported with openvino backend"
)
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format="channels_last",
):
raise NotImplementedError("`resize` is not supported with openvino backend")
def affine_transform(
image,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
raise NotImplementedError(
"`affine_transform` is not supported with openvino backend"
)
def map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0.0
):
raise NotImplementedError(
"`map_coordinates` is not supported with openvino backend"
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_INITIALIZED",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_ffmpeg"):
try:
_init_ffmpeg()
_FFMPEG_INITIALIZED = True
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = no_op if _FFMPEG_INITIALIZED else _fail_since_no_ffmpeg
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_kaldi",
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_KALDI_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_INITIALIZED",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# Kaldi and RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# Kaldi or RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_KALDI_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_KALDI_AVAILABLE = torchaudio.lib._torchaudio.is_kaldi_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_ffmpeg"):
try:
_init_ffmpeg()
_FFMPEG_INITIALIZED = True
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_kaldi = (
no_op
if _IS_KALDI_AVAILABLE
else fail_with_message(
"requires kaldi extension, but TorchAudio is not compiled with it. Please build TorchAudio with kaldi support."
)
)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = no_op if _FFMPEG_INITIALIZED else _fail_since_no_ffmpeg
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of [`TensorFlowTensor`][docarray.typing.TensorFlowTensor],
to represent an audio tensor. Adds audio-specific features to the tensor.
---
```python
from typing import Optional
import tensorflow as tf
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
# doc_1.audio_tensor.save(file_path='file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
```
---
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .detr import DETR
@MODELS.register_module()
class DeformableDETR(DETR):
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .detr import DETR
@DETECTORS.register_module()
class DeformableDETR(DETR):
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from typing import Any, List, Optional, Tuple
import numpy as np
import pytest
from docarray import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
from docarray.utils._internal.pydantic import is_pydantic_v2
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
@pytest.fixture
def nested_docs_docvec():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocVec[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
assert isinstance(d['docs'], list)
assert not isinstance(d['docs'], DocList)
def test_nested_docvec_to_dict(nested_docs_docvec):
d = nested_docs_docvec.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs):
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_nested_to_json(nested_docs):
d = nested_docs.json()
nested_docs.__class__.parse_raw(d)
@pytest.fixture
def nested_none_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: Optional[DocList[SimpleDoc]] = None
hello: str = 'world'
nested_docs = NestedDoc()
return nested_docs
def test_nested_none_to_dict(nested_none_docs):
d = nested_none_docs.dict()
assert d == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_nested_none_to_json(nested_none_docs):
d = nested_none_docs.json()
d = nested_none_docs.__class__.parse_raw(d)
assert d.dict() == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_get_get_field_inner_type():
class MyDoc(BaseDoc):
tuple_: Tuple
field_type = MyDoc._get_field_inner_type("tuple_")
assert field_type == Any
|
from typing import List, Optional
import numpy as np
import pytest
from docarray import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
@pytest.fixture
def nested_docs_docvec():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocVec[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_docvec_to_dict(nested_docs_docvec):
d = nested_docs_docvec.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs):
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
def test_nested_to_json(nested_docs):
d = nested_docs.json()
nested_docs.__class__.parse_raw(d)
@pytest.fixture
def nested_none_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: Optional[DocList[SimpleDoc]]
hello: str = 'world'
nested_docs = NestedDoc()
return nested_docs
def test_nested_none_to_dict(nested_none_docs):
d = nested_none_docs.dict()
assert d == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_nested_none_to_json(nested_none_docs):
d = nested_none_docs.json()
d = nested_none_docs.__class__.parse_raw(d)
assert d.dict() == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
|
from collections import ChainMap
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
MutableMapping,
Type,
TypeVar,
Union,
)
from docarray.array.stacked.list_advance_indexing import ListAdvancedIndexing
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.stacked.array_stacked import DocArrayStacked
IndexIterType = Union[slice, Iterable[int], Iterable[bool], None]
T = TypeVar('T', bound='ColumnStorage')
class ColumnStorage:
"""
ColumnStorage is a container to store the columns of the
:class:`~docarray.array.stacked.DocArrayStacked`.
:param tensor_columns: a Dict of AbstractTensor
:param doc_columns: a Dict of :class:`~docarray.array.stacked.DocArrayStacked`
:param da_columns: a Dict of List of :class:`~docarray.array.stacked.DocArrayStacked`
:param any_columns: a Dict of List
:param tensor_type: Class used to wrap the stacked tensors
"""
def __init__(
self,
tensor_columns: Dict[str, AbstractTensor],
doc_columns: Dict[str, 'DocArrayStacked'],
da_columns: Dict[str, ListAdvancedIndexing['DocArrayStacked']],
any_columns: Dict[str, ListAdvancedIndexing],
tensor_type: Type[AbstractTensor] = NdArray,
):
self.tensor_columns = tensor_columns
self.doc_columns = doc_columns
self.da_columns = da_columns
self.any_columns = any_columns
self.tensor_type = tensor_type
self.columns = ChainMap( # type: ignore
self.tensor_columns, # type: ignore
self.doc_columns, # type: ignore
self.da_columns, # type: ignore
self.any_columns, # type: ignore
) # type: ignore
def __len__(self) -> int:
return len(self.any_columns['id']) # TODO what if ID are None ?
def __getitem__(self: T, item: IndexIterType) -> T:
if isinstance(item, tuple):
item = list(item)
tensor_columns = {key: col[item] for key, col in self.tensor_columns.items()}
doc_columns = {key: col[item] for key, col in self.doc_columns.items()}
da_columns = {key: col[item] for key, col in self.da_columns.items()}
any_columns = {key: col[item] for key, col in self.any_columns.items()}
return self.__class__(
tensor_columns,
doc_columns,
da_columns,
any_columns,
self.tensor_type,
)
class ColumnStorageView(dict, MutableMapping[str, Any]):
index: int
storage: ColumnStorage
def __init__(self, index: int, storage: ColumnStorage):
super().__init__()
self.index = index
self.storage = storage
def __getitem__(self, name: str) -> Any:
if name in self.storage.tensor_columns.keys():
tensor = self.storage.tensor_columns[name]
if tensor.get_comp_backend().n_dim(tensor) == 1:
# to ensure consistensy between numpy and pytorch
# we wrap the scalr in a tensor of ndim = 1
# otherwise numpy pass by value whereas torch by reference
return self.storage.tensor_columns[name][self.index : self.index + 1]
return self.storage.columns[name][self.index]
def __setitem__(self, name, value) -> None:
self.storage.columns[name][self.index] = value
def __delitem__(self, key):
raise RuntimeError('Cannot delete an item from a StorageView')
def __iter__(self):
return self.storage.columns.keys()
def __len__(self):
return len(self.storage.columns)
|
from collections import ChainMap
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
MutableMapping,
Type,
TypeVar,
Union,
)
from docarray.array.stacked.list_advance_indexing import ListAdvancedIndexing
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.stacked.array_stacked import DocumentArrayStacked
IndexIterType = Union[slice, Iterable[int], Iterable[bool], None]
T = TypeVar('T', bound='ColumnStorage')
class ColumnStorage:
"""
ColumnStorage is a container to store the columns of the
:class:`~docarray.array.stacked.DocumentArrayStacked`.
:param tensor_columns: a Dict of AbstractTensor
:param doc_columns: a Dict of :class:`~docarray.array.stacked.DocumentArrayStacked`
:param da_columns: a Dict of List of :class:`~docarray.array.stacked.DocumentArrayStacked`
:param any_columns: a Dict of List
:param tensor_type: Class used to wrap the stacked tensors
"""
def __init__(
self,
tensor_columns: Dict[str, AbstractTensor],
doc_columns: Dict[str, 'DocumentArrayStacked'],
da_columns: Dict[str, ListAdvancedIndexing['DocumentArrayStacked']],
any_columns: Dict[str, ListAdvancedIndexing],
tensor_type: Type[AbstractTensor] = NdArray,
):
self.tensor_columns = tensor_columns
self.doc_columns = doc_columns
self.da_columns = da_columns
self.any_columns = any_columns
self.tensor_type = tensor_type
self.columns = ChainMap( # type: ignore
self.tensor_columns, # type: ignore
self.doc_columns, # type: ignore
self.da_columns, # type: ignore
self.any_columns, # type: ignore
) # type: ignore
def __len__(self) -> int:
return len(self.any_columns['id']) # TODO what if ID are None ?
def __getitem__(self: T, item: IndexIterType) -> T:
if isinstance(item, tuple):
item = list(item)
tensor_columns = {key: col[item] for key, col in self.tensor_columns.items()}
doc_columns = {key: col[item] for key, col in self.doc_columns.items()}
da_columns = {key: col[item] for key, col in self.da_columns.items()}
any_columns = {key: col[item] for key, col in self.any_columns.items()}
return self.__class__(
tensor_columns,
doc_columns,
da_columns,
any_columns,
self.tensor_type,
)
class ColumnStorageView(dict, MutableMapping[str, Any]):
index: int
storage: ColumnStorage
def __init__(self, index: int, storage: ColumnStorage):
super().__init__()
self.index = index
self.storage = storage
def __getitem__(self, name: str) -> Any:
if name in self.storage.tensor_columns.keys():
tensor = self.storage.tensor_columns[name]
if tensor.get_comp_backend().n_dim(tensor) == 1:
# to ensure consistensy between numpy and pytorch
# we wrap the scalr in a tensor of ndim = 1
# otherwise numpy pass by value whereas torch by reference
return self.storage.tensor_columns[name][self.index : self.index + 1]
return self.storage.columns[name][self.index]
def __setitem__(self, name, value) -> None:
self.storage.columns[name][self.index] = value
def __delitem__(self, key):
raise RuntimeError('Cannot delete an item from a StorageView')
def __iter__(self):
return self.storage.columns.keys()
def __len__(self):
return len(self.storage.columns)
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.tools import FunctionTool
from unittest.mock import patch
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceInferenceAPI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
def test_prepare_chat_with_tools_tool_required():
"""Test that tool_required is correctly passed to the API request when True."""
with (
patch("huggingface_hub.InferenceClient"),
patch("huggingface_hub.AsyncInferenceClient"),
):
llm = HuggingFaceInferenceAPI(model_name="model_name")
# Test with tool_required=True
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=True)
assert result["tool_choice"] == "required"
assert len(result["tools"]) == 1
assert result["tools"][0]["function"]["name"] == "search_tool"
def test_prepare_chat_with_tools_tool_not_required():
"""Test that tool_required is correctly passed to the API request when False."""
with (
patch("huggingface_hub.InferenceClient"),
patch("huggingface_hub.AsyncInferenceClient"),
):
llm = HuggingFaceInferenceAPI(model_name="model_name")
# Test with tool_required=False (default)
result = llm._prepare_chat_with_tools(
tools=[search_tool],
)
assert result["tool_choice"] == "auto"
assert len(result["tools"]) == 1
assert result["tools"][0]["function"]["name"] == "search_tool"
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceInferenceAPI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
__version__ = '0.13.0'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
if 'NO_VERSION_CHECK' not in os.environ:
from .helper import is_latest_version
is_latest_version()
|
__version__ = '0.12.10'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
if 'NO_VERSION_CHECK' not in os.environ:
from .helper import is_latest_version
is_latest_version()
|
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT, Datapoint
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.6.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.5"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=8, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(evaluation.SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], max_active_dims=k_dim))
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(evaluation.SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], truncate_dim=k_dim))
dev_evaluator = evaluation.SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .history_buffer import HistoryBuffer
from .log_processor import LogProcessor
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = [
'HistoryBuffer', 'MessageHub', 'MMLogger', 'print_log', 'LogProcessor'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .history_buffer import HistoryBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['HistoryBuffer', 'MessageHub', 'MMLogger', 'print_log']
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmengine.model import constant_init
from mmdet.models.layers import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmengine.model.utils import constant_init
from mmdet.models.layers import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
|
"""
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
from sentence_transformers import SentenceTransformer
import sys
import time
import torch
from datasets import load_dataset
# Limit torch to 4 threads
torch.set_num_threads(4)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-nli-mean-tokens"
# Load a sentence transformer model
model = SentenceTransformer(model_name)
max_sentences = 100_000
all_nli_dataset = load_dataset("sentence-transformers/all-nli", "pair", split="train")
sentences = list(set(all_nli_dataset["anchor"]))[:max_sentences]
print("Model Name:", model_name)
print("Number of sentences:", len(sentences))
for i in range(3):
print("Run", i)
start_time = time.time()
emb = model.encode(sentences, batch_size=32)
end_time = time.time()
diff_time = end_time - start_time
print("Done after {:.2f} seconds".format(diff_time))
print("Speed: {:.2f} sentences / second".format(len(sentences) / diff_time))
print("=====")
|
"""
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
from sentence_transformers import SentenceTransformer
import sys
import time
import torch
from datasets import load_dataset
# Limit torch to 4 threads
torch.set_num_threads(4)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-nli-mean-tokens"
# Load a sentence transformer model
model = SentenceTransformer(model_name)
max_sentences = 10_000
dataset = load_dataset("sentence-transformers/all-nli", "pair", split="train")
sentences = list(set(dataset["anchor"] + dataset["positive"]))[:max_sentences]
print("Model Name:", model_name)
print("Number of sentences:", len(sentences))
for i in range(3):
print("Run", i)
start_time = time.time()
emb = model.encode(sentences, batch_size=32)
end_time = time.time()
diff_time = end_time - start_time
print("Done after {:.2f} seconds".format(diff_time))
print("Speed: {:.2f} sentences / second".format(len(sentences) / diff_time))
print("=====")
|
import gc
import unittest
from diffusers import (
SanaTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class SanaTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = SanaTransformer2DModel
ckpt_path = (
"https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth"
)
alternate_keys_ckpt_paths = [
"https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth"
]
repo_id = "Efficient-Large-Model/Sana_1600M_1024px_diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
|
import gc
import unittest
import torch
from diffusers import (
SanaTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class SanaTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = SanaTransformer2DModel
ckpt_path = (
"https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth"
)
alternate_keys_ckpt_paths = [
"https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth"
]
repo_id = "Efficient-Large-Model/Sana_1600M_1024px_diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
torch.cuda.empty_cache()
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
torch.cuda.empty_cache()
|
import random
from pathlib import Path
from typing import Callable, Dict, Tuple
import opentelemetry.sdk.metrics.export
import opentelemetry.sdk.metrics.view
import pytest
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
MetricExporter,
MetricExportResult,
MetricsData,
PeriodicExportingMetricReader,
)
class DirMetricExporter(MetricExporter):
"""Implementation of :class:`MetricExporter` that prints metrics to a file in a given directory.
This class can be used for diagnostic or testing purposes.
"""
def __init__(
self,
metric_dir: str,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk.metrics.view.Aggregation"
] = None,
):
super().__init__(
preferred_temporality=preferred_temporality,
preferred_aggregation=preferred_aggregation,
)
self.metric_filename: Path = Path(metric_dir) / str(random.randint(0, 1048575))
self.f = open(self.metric_filename, 'a')
def export(
self,
metrics_data: MetricsData,
timeout_millis: float = 10_000,
**kwargs,
) -> MetricExportResult:
self.f.write(metrics_data.to_json())
self.f.write('\n')
self.f.flush()
return MetricExportResult.SUCCESS
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
pass
def force_flush(self, timeout_millis: float = 10_000) -> bool:
return True
def __del__(self):
self.f.close()
@pytest.fixture(scope='function')
def monkeypatch_metric_exporter(
tmpdir_factory: pytest.TempdirFactory,
) -> Tuple[Callable, Callable]:
import json
import os
import time
from pathlib import Path
import opentelemetry.sdk.metrics.export
collect_path = Path(tmpdir_factory.mktemp('otel-collector'))
metrics_path = collect_path / 'metrics'
os.mkdir(metrics_path)
tick_counter_filename = collect_path / 'tick_counter'
with open(tick_counter_filename, 'w') as f:
f.write('0')
def collect_metrics():
with open(tick_counter_filename, 'r') as f:
tick_counter = int(f.read())
with open(tick_counter_filename, 'w') as f:
f.write(str(tick_counter + 1))
time.sleep(2)
def _get_service_name(otel_measurement):
return otel_measurement['resource_metrics'][0]['resource']['attributes'][
'service.name'
]
def read_metrics():
def read_metric_file(filename):
with open(filename, 'r') as f:
return json.loads(f.read())
return {
_get_service_name(i): i
for i in map(read_metric_file, metrics_path.glob('*'))
}
class PatchedTextReader(PeriodicExportingMetricReader):
def __init__(self, *args, **kwargs) -> None:
self.exporter = DirMetricExporter(metrics_path)
self.tick_counter = 0
super().__init__(
exporter=self.exporter,
export_interval_millis=500,
)
def _ticker(self) -> None:
interval_secs = self._export_interval_millis / 1e3
while not self._shutdown_event.wait(interval_secs):
with open(tick_counter_filename, 'r') as f:
tick_counter = int(f.read())
if tick_counter != self.tick_counter:
self.tick_counter = tick_counter
self.collect(timeout_millis=self._export_timeout_millis)
self.collect(timeout_millis=self._export_interval_millis)
real_reader = opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = PatchedTextReader
yield collect_metrics, read_metrics
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = real_reader
|
import random
import pytest
from pathlib import Path
from typing import Dict, Tuple, Callable
import opentelemetry.sdk.metrics.export
import opentelemetry.sdk.metrics.view
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
MetricExporter,
MetricExportResult,
MetricsData,
PeriodicExportingMetricReader,
)
class DirMetricExporter(MetricExporter):
"""Implementation of :class:`MetricExporter` that prints metrics to a file in a given directory.
This class can be used for diagnostic or testing purposes.
"""
def __init__(
self,
metric_dir: str,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk.metrics.view.Aggregation"
] = None,
):
super().__init__(
preferred_temporality=preferred_temporality,
preferred_aggregation=preferred_aggregation,
)
self.metric_filename: Path = Path(metric_dir) / str(random.randint(0, 1048575))
self.f = open(self.metric_filename, 'a')
def export(
self,
metrics_data: MetricsData,
timeout_millis: float = 10_000,
**kwargs,
) -> MetricExportResult:
self.f.write(metrics_data.to_json())
self.f.write('\n')
self.f.flush()
return MetricExportResult.SUCCESS
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
pass
def force_flush(self, timeout_millis: float = 10_000) -> bool:
return True
def __del__(self):
self.f.close()
@pytest.fixture(scope='function')
def monkeypatch_metric_exporter(
tmpdir_factory: pytest.TempdirFactory,
) -> Tuple[Callable, Callable]:
import opentelemetry.sdk.metrics.export
from pathlib import Path
import time
import os
import json
collect_path = Path(tmpdir_factory.mktemp('otel-collector'))
metrics_path = collect_path / 'metrics'
os.mkdir(metrics_path)
tick_counter_filename = collect_path / 'tick_counter'
with open(tick_counter_filename, 'w') as f:
f.write('0')
def collect_metrics():
with open(tick_counter_filename, 'r') as f:
tick_counter = int(f.read())
with open(tick_counter_filename, 'w') as f:
f.write(str(tick_counter + 1))
time.sleep(2)
def _get_service_name(otel_measurement):
return otel_measurement[0]['resource_metrics'][0]['resource']['attributes'][
'service.name'
]
def read_metrics():
def read_metric_file(filename):
with open(filename, 'r') as f:
return list(map(json.loads, f.readlines()))
return {
_get_service_name(i): i
for i in map(read_metric_file, metrics_path.glob('*'))
}
class PatchedTextReader(PeriodicExportingMetricReader):
def __init__(self, *args, **kwargs) -> None:
self.exporter = DirMetricExporter(metrics_path)
self.tick_counter = 0
super().__init__(
exporter=self.exporter,
export_interval_millis=500,
)
def _ticker(self) -> None:
interval_secs = self._export_interval_millis / 1e3
while not self._shutdown_event.wait(interval_secs):
with open(tick_counter_filename, 'r') as f:
tick_counter = int(f.read())
if tick_counter != self.tick_counter:
self.tick_counter = tick_counter
self.collect(timeout_millis=self._export_timeout_millis)
self.collect(timeout_millis=self._export_interval_millis)
real_reader = opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = PatchedTextReader
yield collect_metrics, read_metrics
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = real_reader
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(scope='function')
def test_envs(tmpdir):
os.environ['JINA_HUB_ROOT'] = str(tmpdir)
os.environ['JINA_HUB_CACHE_DIR'] = str(tmpdir)
yield None
del os.environ['JINA_HUB_ROOT']
del os.environ['JINA_HUB_CACHE_DIR']
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(scope='function')
def test_envs(tmpdir):
os.environ['JINA_HUB_ROOT'] = str(tmpdir)
os.environ['JINA_HUB_CACHE_DIR'] = str(tmpdir)
yield None
del os.environ['JINA_HUB_ROOT']
del os.environ['JINA_HUB_CACHE_DIR']
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import json
import pytest # type: ignore[import-not-found]
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_openai.chat_models.base import (
_convert_dict_to_message,
_convert_message_to_dict,
)
from langchain_xai import ChatXAI
MODEL_NAME = "grok-4"
def test_initialization() -> None:
"""Test chat model initialization."""
ChatXAI(model=MODEL_NAME)
def test_xai_model_param() -> None:
llm = ChatXAI(model="foo")
assert llm.model_name == "foo"
llm = ChatXAI(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "xai"
def test_chat_xai_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
ChatXAI(
model=MODEL_NAME,
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
def test_chat_xai_extra_kwargs() -> None:
"""Test extra kwargs to chat xai."""
# Check that foo is saved in extra_kwargs.
llm = ChatXAI(model=MODEL_NAME, foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = ChatXAI(model=MODEL_NAME, foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
ChatXAI(model=MODEL_NAME, foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
def test_function_dict_to_message_function_message() -> None:
content = json.dumps({"result": "Example #1"})
name = "test_function"
result = _convert_dict_to_message(
{
"role": "function",
"name": name,
"content": content,
}
)
assert isinstance(result, FunctionMessage)
assert result.name == name
assert result.content == content
def test_convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test__convert_dict_to_message_human_with_name() -> None:
message = {"role": "user", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_ai_with_name() -> None:
message = {"role": "assistant", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_system_with_name() -> None:
message = {"role": "system", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_tool() -> None:
message = {"role": "tool", "content": "foo", "tool_call_id": "bar"}
result = _convert_dict_to_message(message)
expected_output = ToolMessage(content="foo", tool_call_id="bar")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
|
import json
import pytest # type: ignore[import-not-found]
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_openai.chat_models.base import (
_convert_dict_to_message,
_convert_message_to_dict,
)
from langchain_xai import ChatXAI
def test_initialization() -> None:
"""Test chat model initialization."""
ChatXAI(model="grok-beta")
def test_xai_model_param() -> None:
llm = ChatXAI(model="foo")
assert llm.model_name == "foo"
llm = ChatXAI(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "xai"
def test_chat_xai_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
ChatXAI(
model="grok-beta",
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
def test_chat_xai_extra_kwargs() -> None:
"""Test extra kwargs to chat xai."""
# Check that foo is saved in extra_kwargs.
llm = ChatXAI(model="grok-beta", foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = ChatXAI(model="grok-beta", foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
ChatXAI(model="grok-beta", foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
def test_function_dict_to_message_function_message() -> None:
content = json.dumps({"result": "Example #1"})
name = "test_function"
result = _convert_dict_to_message(
{
"role": "function",
"name": name,
"content": content,
}
)
assert isinstance(result, FunctionMessage)
assert result.name == name
assert result.content == content
def test_convert_dict_to_message_human() -> None:
message = {"role": "user", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test__convert_dict_to_message_human_with_name() -> None:
message = {"role": "user", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = HumanMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_ai() -> None:
message = {"role": "assistant", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_ai_with_name() -> None:
message = {"role": "assistant", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_system() -> None:
message = {"role": "system", "content": "foo"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="foo")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_system_with_name() -> None:
message = {"role": "system", "content": "foo", "name": "test"}
result = _convert_dict_to_message(message)
expected_output = SystemMessage(content="foo", name="test")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
def test_convert_dict_to_message_tool() -> None:
message = {"role": "tool", "content": "foo", "tool_call_id": "bar"}
result = _convert_dict_to_message(message)
expected_output = ToolMessage(content="foo", tool_call_id="bar")
assert result == expected_output
assert _convert_message_to_dict(expected_output) == message
|
__version__ = '0.14.6'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.5'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from collections.abc import Generator
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.tools.edenai import EdenAiTextModerationTool
tool = EdenAiTextModerationTool(
providers=["openai"],
language="en",
edenai_api_key="fake_key", # type: ignore[arg-type]
)
@pytest.fixture
def mock_post() -> Generator:
with patch(
"langchain_community.tools.edenai.edenai_base_tool.requests.post"
) as mock:
yield mock
def test_provider_not_available(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"error": {
"message": """Amazon has returned an error:
An error occurred (TextSizeLimitExceededException)
when calling the DetectTargetedSentiment
operation: Input text size exceeds limit.
Max length of request text allowed is 5000 bytes
while in this request the text size is 47380 bytes""",
"type": "ProviderInvalidInputTextLengthError",
},
"status": "fail",
"provider": "amazon",
"provider_status_code": 400,
"cost": 0.0,
}
]
mock_post.return_value = mock_response
with pytest.raises(ValueError):
tool._run("some query")
def test_unexpected_response(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"status": "success",
}
]
mock_post.return_value = mock_response
with pytest.raises(RuntimeError):
tool._run("some query")
def test_incomplete_response(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"status": "success",
"provider": "microsoft",
"nsfw_likelihood": 5,
"cost": 0.001,
"label": ["sexually explicit", "sexually suggestive", "offensive"],
}
]
mock_post.return_value = mock_response
with pytest.raises(RuntimeError):
tool._run("some query")
def test_invalid_payload(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 400
mock_response.json.return_value = {}
mock_post.return_value = mock_response
with pytest.raises(ValueError):
tool._run("some query")
def test_parse_response_format(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"status": "success",
"provider": "microsoft",
"nsfw_likelihood": 5,
"cost": 0.001,
"label": ["offensive", "hate_speech"],
"likelihood": [4, 5],
}
]
mock_post.return_value = mock_response
result = tool.invoke("some query")
assert result == 'nsfw_likelihood: 5\n"offensive": 4\n"hate_speech": 5'
|
from collections.abc import Generator
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.tools.edenai import EdenAiTextModerationTool
tool = EdenAiTextModerationTool( # type: ignore[call-arg]
providers=["openai"],
language="en",
edenai_api_key="fake_key", # type: ignore[arg-type]
)
@pytest.fixture
def mock_post() -> Generator:
with patch(
"langchain_community.tools.edenai.edenai_base_tool.requests.post"
) as mock:
yield mock
def test_provider_not_available(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"error": {
"message": """Amazon has returned an error:
An error occurred (TextSizeLimitExceededException)
when calling the DetectTargetedSentiment
operation: Input text size exceeds limit.
Max length of request text allowed is 5000 bytes
while in this request the text size is 47380 bytes""",
"type": "ProviderInvalidInputTextLengthError",
},
"status": "fail",
"provider": "amazon",
"provider_status_code": 400,
"cost": 0.0,
}
]
mock_post.return_value = mock_response
with pytest.raises(ValueError):
tool._run("some query")
def test_unexpected_response(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"status": "success",
}
]
mock_post.return_value = mock_response
with pytest.raises(RuntimeError):
tool._run("some query")
def test_incomplete_response(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"status": "success",
"provider": "microsoft",
"nsfw_likelihood": 5,
"cost": 0.001,
"label": ["sexually explicit", "sexually suggestive", "offensive"],
}
]
mock_post.return_value = mock_response
with pytest.raises(RuntimeError):
tool._run("some query")
def test_invalid_payload(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 400
mock_response.json.return_value = {}
mock_post.return_value = mock_response
with pytest.raises(ValueError):
tool._run("some query")
def test_parse_response_format(mock_post: MagicMock) -> None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"status": "success",
"provider": "microsoft",
"nsfw_likelihood": 5,
"cost": 0.001,
"label": ["offensive", "hate_speech"],
"likelihood": [4, 5],
}
]
mock_post.return_value = mock_response
result = tool.invoke("some query")
assert result == 'nsfw_likelihood: 5\n"offensive": 4\n"hate_speech": 5'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .ddq_detr_layers import DDQTransformerDecoder
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .grounding_dino_layers import (GroundingDinoTransformerDecoder,
GroundingDinoTransformerDecoderLayer,
GroundingDinoTransformerEncoder)
from .mask2former_layers import (Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder)
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'DDQTransformerDecoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'GroundingDinoTransformerDecoderLayer', 'GroundingDinoTransformerEncoder',
'GroundingDinoTransformerDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .ddq_detr_layers import DDQTransformerDecoder
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .mask2former_layers import (Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder)
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'DDQTransformerDecoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
MODULE2PACKAGE = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
'mmyolo': 'mmyolo',
'mmpretrain': 'mmpretrain',
'mmagic': 'mmagic',
}
# PKG2PROJECT is not a proper name to represent the mapping between module name
# (module import from) and package name (used by pip install). Therefore,
# PKG2PROJECT will be deprecated and this alias will only be kept until
# MMEngine v1.0.0
PKG2PROJECT = MODULE2PACKAGE
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in MODULE2PACKAGE, (
f'mmengine does not support to load {package} config.')
package = MODULE2PACKAGE[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
MODULE2PACKAGE = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
'mmyolo': 'mmyolo',
'mmpretrain': 'mmpretrain',
}
# PKG2PROJECT is not a proper name to represent the mapping between module name
# (module import from) and package name (used by pip install). Therefore,
# PKG2PROJECT will be deprecated and this alias will only be kept until
# MMEngine v1.0.0
PKG2PROJECT = MODULE2PACKAGE
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in MODULE2PACKAGE, (
f'mmengine does not support to load {package} config.')
package = MODULE2PACKAGE[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
"""Testing code shared by other tests."""
# pylint: disable=invalid-name
import collections
import importlib.util
import json
import os
import tempfile
from typing import Any, Callable, Dict, Type
import numpy as np
import xgboost as xgb
from xgboost._typing import ArrayLike
def validate_leaf_output(leaf: np.ndarray, num_parallel_tree: int) -> None:
"""Validate output for predict leaf tests."""
for i in range(leaf.shape[0]): # n_samples
for j in range(leaf.shape[1]): # n_rounds
for k in range(leaf.shape[2]): # n_classes
tree_group = leaf[i, j, k, :]
assert tree_group.shape[0] == num_parallel_tree
# No sampling, all trees within forest are the same
assert np.all(tree_group == tree_group[0])
def validate_data_initialization(
dmatrix: Type, model: Type[xgb.XGBModel], X: ArrayLike, y: ArrayLike
) -> None:
"""Assert that we don't create duplicated DMatrix."""
old_init = dmatrix.__init__
count = [0]
def new_init(self: Any, **kwargs: Any) -> Callable:
count[0] += 1
return old_init(self, **kwargs)
dmatrix.__init__ = new_init
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
assert count[0] == 1
count[0] = 0 # only 1 DMatrix is created.
y_copy = y.copy()
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
assert count[0] == 2 # a different Python object is considered different
dmatrix.__init__ = old_init
# pylint: disable=too-many-arguments,too-many-locals
def get_feature_weights(
X: ArrayLike,
y: ArrayLike,
fw: np.ndarray,
parser_path: str,
tree_method: str,
model: Type[xgb.XGBModel] = xgb.XGBRegressor,
) -> np.ndarray:
"""Get feature weights using the demo parser."""
with tempfile.TemporaryDirectory() as tmpdir:
colsample_bynode = 0.5
reg = model(tree_method=tree_method, colsample_bynode=colsample_bynode)
reg.fit(X, y, feature_weights=fw)
model_path = os.path.join(tmpdir, "model.json")
reg.save_model(model_path)
with open(model_path, "r", encoding="utf-8") as fd:
model = json.load(fd)
spec = importlib.util.spec_from_file_location("JsonParser", parser_path)
assert spec is not None
jsonm = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(jsonm)
model = jsonm.Model(model)
splits: Dict[int, int] = {}
total_nodes = 0
for tree in model.trees:
n_nodes = len(tree.nodes)
total_nodes += n_nodes
for n in range(n_nodes):
if tree.is_leaf(n):
continue
if splits.get(tree.split_index(n), None) is None:
splits[tree.split_index(n)] = 1
else:
splits[tree.split_index(n)] += 1
od = collections.OrderedDict(sorted(splits.items()))
tuples = list(od.items())
k, v = list(zip(*tuples))
w = np.polyfit(k, v, deg=1)
return w
|
"""Testing code shared by other tests."""
# pylint: disable=invalid-name
import collections
import importlib.util
import json
import os
import tempfile
from typing import Any, Callable, Dict, Type
import numpy as np
import xgboost as xgb
from xgboost._typing import ArrayLike
def validate_leaf_output(leaf: np.ndarray, num_parallel_tree: int) -> None:
"""Validate output for predict leaf tests."""
for i in range(leaf.shape[0]): # n_samples
for j in range(leaf.shape[1]): # n_rounds
for k in range(leaf.shape[2]): # n_classes
tree_group = leaf[i, j, k, :]
assert tree_group.shape[0] == num_parallel_tree
# No sampling, all trees within forest are the same
assert np.all(tree_group == tree_group[0])
def validate_data_initialization(
dmatrix: Type, model: Type[xgb.XGBModel], X: ArrayLike, y: ArrayLike
) -> None:
"""Assert that we don't create duplicated DMatrix."""
old_init = dmatrix.__init__
count = [0]
def new_init(self: Any, **kwargs: Any) -> Callable:
count[0] += 1
return old_init(self, **kwargs)
dmatrix.__init__ = new_init
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
assert count[0] == 1
count[0] = 0 # only 1 DMatrix is created.
y_copy = y.copy()
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
assert count[0] == 2 # a different Python object is considered different
dmatrix.__init__ = old_init
# pylint: disable=too-many-arguments,too-many-locals
def get_feature_weights(
X: ArrayLike,
y: ArrayLike,
fw: np.ndarray,
parser_path: str,
tree_method: str,
model: Type[xgb.XGBModel] = xgb.XGBRegressor,
) -> np.ndarray:
"""Get feature weights using the demo parser."""
with tempfile.TemporaryDirectory() as tmpdir:
colsample_bynode = 0.5
reg = model(tree_method=tree_method, colsample_bynode=colsample_bynode)
reg.fit(X, y, feature_weights=fw)
model_path = os.path.join(tmpdir, "model.json")
reg.save_model(model_path)
with open(model_path, "r", encoding="utf-8") as fd:
model = json.load(fd)
spec = importlib.util.spec_from_file_location("JsonParser", parser_path)
assert spec is not None
jsonm = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(jsonm)
model = jsonm.Model(model)
splits: Dict[int, int] = {}
total_nodes = 0
for tree in model.trees:
n_nodes = len(tree.nodes)
total_nodes += n_nodes
for n in range(n_nodes):
if tree.is_leaf(n):
continue
if splits.get(tree.split_index(n), None) is None:
splits[tree.split_index(n)] = 1
else:
splits[tree.split_index(n)] += 1
od = collections.OrderedDict(sorted(splits.items()))
tuples = list(od.items())
k, v = list(zip(*tuples))
w = np.polyfit(k, v, deg=1)
return w
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config(weaviate_client):
def get_text_field_data_type(index, index_name):
props = index._client.schema.get(index_name)["properties"]
text_field = [p for p in props if p["name"] == "text"][0]
return text_field["dataType"][0]
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type="string")
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="TextDoc")
index = WeaviateDocumentIndex[TextDoc](db_config=dbconfig)
assert get_text_field_data_type(index, "TextDoc") == "text"
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="StringDoc")
index = WeaviateDocumentIndex[StringDoc](db_config=dbconfig)
assert get_text_field_data_type(index, "StringDoc") == "string"
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config(weaviate_client):
def get_text_field_data_type(store, index_name):
props = store._client.schema.get(index_name)["properties"]
text_field = [p for p in props if p["name"] == "text"][0]
return text_field["dataType"][0]
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type="string")
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="TextDoc")
store = WeaviateDocumentIndex[TextDoc](db_config=dbconfig)
assert get_text_field_data_type(store, "TextDoc") == "text"
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="StringDoc")
store = WeaviateDocumentIndex[StringDoc](db_config=dbconfig)
assert get_text_field_data_type(store, "StringDoc") == "string"
|
_base_ = './mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.utils.checkpoint import checkpoint
from mmdet.registry import MODELS
@MODELS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.utils.checkpoint import checkpoint
from ..builder import NECKS
@NECKS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
from typing import Any, Dict, Optional
import httpx
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.nebius.utils import (
resolve_nebius_credentials,
)
from llama_index.embeddings.openai import OpenAIEmbedding
DEFAULT_API_BASE = "https://api.studio.nebius.ai/v1"
DEFAULT_MODEL = "BAAI/bge-en-icl"
class NebiusEmbedding(OpenAIEmbedding):
"""
Nebius class for embeddings.
Args:
model (str): Model for embedding. Defaults to "BAAI/bge-en-icl"
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The Nebius AI Studio API key.")
api_base: str = Field(description="The base URL for Nebius AI Studio API.")
api_version: str = Field(description="The version for OpenAI API.")
def __init__(
self,
model_name: str = DEFAULT_MODEL,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = DEFAULT_API_BASE,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
api_key, api_base, api_version = resolve_nebius_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "NebiusEmbedding"
|
from typing import Any, Dict, Optional
import httpx
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.nebius.utils import (
resolve_nebius_credentials,
)
from llama_index.embeddings.openai import OpenAIEmbedding
DEFAULT_API_BASE = "https://api.studio.nebius.ai/v1"
DEFAULT_MODEL = "BAAI/bge-en-icl"
class NebiusEmbedding(OpenAIEmbedding):
"""
Nebius class for embeddings.
Args:
model (str): Model for embedding. Defaults to "BAAI/bge-en-icl"
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The Nebius AI Studio API key.")
api_base: str = Field(description="The base URL for Nebius AI Studio API.")
api_version: str = Field(description="The version for OpenAI API.")
def __init__(
self,
model_name: str = DEFAULT_MODEL,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = DEFAULT_API_BASE,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
api_key, api_base, api_version = resolve_nebius_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "NebiusEmbedding"
|
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from ..numpy_searcher import NumpySearcher
TOP_K = 5
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def query_docs():
chunks = DocumentArray([Document(embedding=np.random.random(7))])
root_doc = Document(embedding=np.random.random(7))
root_doc.chunks.extend(chunks)
docs = DocumentArray()
docs.append(root_doc)
return docs
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
def test_query_vector(tmpdir, query_docs, default_traversal_paths):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, default_traversal_paths=default_traversal_paths, runtime_args=runtime)
indexer.search(query_docs, {'top_k': TOP_K})
assert len(query_docs) == 1
doc_traversal = query_docs.traverse_flat(default_traversal_paths)
assert len(doc_traversal[0].matches) == TOP_K
assert len(doc_traversal[0].matches[0].embedding) == 7
@pytest.mark.parametrize(['metric', 'is_distance'],
[('cosine', True), ('euclidean', True),
('cosine', False), ('euclidean', False)])
def test_metric(tmpdir, query_docs, metric, is_distance):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, default_top_k=TOP_K, runtime_args=runtime, metric=metric,
is_distance=is_distance)
indexer.search(query_docs, {})
assert len(query_docs[0].matches) == TOP_K
for i in range(len(query_docs[0].matches) - 1):
if not is_distance:
assert query_docs[0].matches[i].scores[metric].value >= query_docs[0].matches[i + 1].scores[metric].value
else:
assert query_docs[0].matches[i].scores[metric].value <= query_docs[0].matches[i + 1].scores[metric].value
def test_empty_shard(tmpdir, query_docs):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump_empty', runtime_args=runtime)
indexer.search(query_docs, {'top_k': TOP_K})
assert len(query_docs) == 1
assert len(query_docs[0].matches) == 0
def test_empty_documents(tmpdir):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump1', runtime_args=runtime)
docs = DocumentArray([Document()])
indexer.search(docs, {'top_k': TOP_K})
assert len(docs) == 1
assert len(docs[0].matches) == 0
docs2 = DocumentArray()
indexer.search(docs2, {'top_k': TOP_K})
assert len(docs2) == 0
|
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from .. import NumpySearcher
TOP_K = 5
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def query_docs():
chunks = DocumentArray([Document(embedding=np.random.random(7))])
root_doc = Document(embedding=np.random.random(7))
root_doc.chunks.extend(chunks)
docs = DocumentArray()
docs.append(root_doc)
return docs
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
def test_query_vector(tmpdir, query_docs, default_traversal_paths):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, default_traversal_paths=default_traversal_paths, runtime_args=runtime)
indexer.search(query_docs, {'top_k': TOP_K})
assert len(query_docs) == 1
doc_traversal = query_docs.traverse_flat(default_traversal_paths)
assert len(doc_traversal[0].matches) == TOP_K
assert len(doc_traversal[0].matches[0].embedding) == 7
@pytest.mark.parametrize(['metric', 'is_distance'],
[('cosine', True), ('euclidean', True),
('cosine', False), ('euclidean', False)])
def test_metric(tmpdir, query_docs, metric, is_distance):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
dump_path = os.path.join(cur_dir, 'dump1')
indexer = NumpySearcher(dump_path=dump_path, default_top_k=TOP_K, runtime_args=runtime, metric=metric,
is_distance=is_distance)
indexer.search(query_docs, {})
assert len(query_docs[0].matches) == TOP_K
for i in range(len(query_docs[0].matches) - 1):
if not is_distance:
assert query_docs[0].matches[i].scores[metric].value >= query_docs[0].matches[i + 1].scores[metric].value
else:
assert query_docs[0].matches[i].scores[metric].value <= query_docs[0].matches[i + 1].scores[metric].value
def test_empty_shard(tmpdir, query_docs):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump_empty', runtime_args=runtime)
indexer.search(query_docs, {'top_k': TOP_K})
assert len(query_docs) == 1
assert len(query_docs[0].matches) == 0
def test_empty_documents(tmpdir):
runtime = {
'workspace': str(tmpdir),
'name': 'searcher',
'pea_id': 0,
'replica_id': 0,
}
indexer = NumpySearcher(dump_path='tests/dump1', runtime_args=runtime)
docs = DocumentArray([Document()])
indexer.search(docs, {'top_k': TOP_K})
assert len(docs) == 1
assert len(docs[0].matches) == 0
docs2 = DocumentArray()
indexer.search(docs2, {'top_k': TOP_K})
assert len(docs2) == 0
|
from typing import Type
from docarray.array.abstract_array import AbstractDocumentArray
from docarray.proto import DocumentArrayProto, NodeProto
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
return cls(cls.document_type.from_protobuf(od) for od in pb_msg.docs)
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``,
if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
dap = DocumentArrayProto()
for doc in self:
dap.docs.append(doc.to_protobuf())
return dap
def _to_node_protobuf(self) -> NodeProto:
"""Convert a DocumentArray into a NodeProto protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(chunks=self.to_protobuf())
|
from typing import Type
from docarray.proto import DocumentArrayProto, NodeProto
from ..abstract_array import AbstractDocumentArray
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
return cls(cls.document_type.from_protobuf(od) for od in pb_msg.docs)
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``,
if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
dap = DocumentArrayProto()
for doc in self:
dap.docs.append(doc.to_protobuf())
return dap
def _to_node_protobuf(self) -> NodeProto:
"""Convert a DocumentArray into a NodeProto protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(chunks=self.to_protobuf())
|
import os
import random
from torchaudio.datasets import iemocap
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
LABELS = ["neu", "hap", "ang", "sad", "exc", "fru", "xxx"]
SAMPLE_RATE = 16000
def _save_wav(filepath: str, seed: int):
wav = get_whitenoise(
sample_rate=SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
save_wav(filepath, wav, SAMPLE_RATE)
return wav
def _save_label(label_folder: str, filename: str, wav_stem: str):
label = random.choice(LABELS)
line = f"[xxx]\t{wav_stem}\t{label}\t[yyy]"
filepath = os.path.join(label_folder, filename)
with open(filepath, "a") as f:
f.write(line + "\n")
return label
def _get_samples(dataset_dir: str, session: int):
session_folder = os.path.join(dataset_dir, f"Session{session}")
os.makedirs(session_folder, exist_ok=True)
wav_folder = os.path.join(session_folder, "sentences", "wav")
label_folder = os.path.join(session_folder, "dialog", "EmoEvaluation")
os.makedirs(wav_folder, exist_ok=True)
os.makedirs(label_folder, exist_ok=True)
wav_stems = []
for i in range(5):
for g in ["F", "M"]:
for utt in ["impro", "script"]:
speaker = f"Ses0{session}{g}"
subfolder = f"{speaker}_{utt}0{i}"
subfolder_path = os.path.join(wav_folder, subfolder)
os.makedirs(subfolder_path, exist_ok=True)
for j in range(5):
wav_stem = f"{subfolder}_F00{j}"
wav_stems.append(wav_stem)
all_samples = []
impro_samples = []
script_samples = []
wav_stems = sorted(wav_stems)
for wav_stem in wav_stems:
subfolder = wav_stem[:-5]
speaker = subfolder.split("_")[0]
wav_file = os.path.join(wav_folder, subfolder, wav_stem + ".wav")
wav = _save_wav(wav_file, seed=0)
label = _save_label(label_folder, subfolder + ".txt", wav_stem)
if label == "xxx":
continue
sample = (wav, SAMPLE_RATE, wav_stem, label, speaker)
all_samples.append(sample)
if "impro" in subfolder:
impro_samples.append(sample)
else:
script_samples.append(sample)
return all_samples, script_samples, impro_samples
def get_mock_dataset(dataset_dir: str):
os.makedirs(dataset_dir, exist_ok=True)
all_samples = []
script_samples = []
impro_samples = []
for session in range(1, 4):
samples = _get_samples(dataset_dir, session)
all_samples += samples[0]
script_samples += samples[1]
impro_samples += samples[2]
return all_samples, script_samples, impro_samples
class TestIemocap(TempDirMixin, TorchaudioTestCase):
root_dir = None
all_samples = []
script_samples = []
impro_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "IEMOCAP")
cls.all_samples, cls.script_samples, cls.impro_samples = get_mock_dataset(dataset_dir)
def _testIEMOCAP(self, dataset, samples):
num_samples = 0
for i, data in enumerate(dataset):
self.assertEqual(data, samples[i])
num_samples += 1
assert num_samples == len(samples)
def testIEMOCAPFullDataset(self):
dataset = iemocap.IEMOCAP(self.root_dir)
self._testIEMOCAP(dataset, self.all_samples)
def testIEMOCAPScriptedDataset(self):
dataset = iemocap.IEMOCAP(self.root_dir, utterance_type="scripted")
self._testIEMOCAP(dataset, self.script_samples)
def testIEMOCAPImprovisedDataset(self):
dataset = iemocap.IEMOCAP(self.root_dir, utterance_type="improvised")
self._testIEMOCAP(dataset, self.impro_samples)
|
import os
import random
from torchaudio.datasets import iemocap
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
LABELS = ["neu", "hap", "ang", "sad", "exc", "fru", "xxx"]
SAMPLE_RATE = 16000
def _save_wav(filepath: str, seed: int):
wav = get_whitenoise(
sample_rate=SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
save_wav(filepath, wav, SAMPLE_RATE)
return wav
def _save_label(label_folder: str, filename: str, wav_stem: str):
label = random.choice(LABELS)
line = f"[xxx]\t{wav_stem}\t{label}\t[yyy]"
filepath = os.path.join(label_folder, filename)
with open(filepath, "a") as f:
f.write(line + "\n")
return label
def _get_samples(dataset_dir: str, session: int):
session_folder = os.path.join(dataset_dir, f"Session{session}")
os.makedirs(session_folder, exist_ok=True)
wav_folder = os.path.join(session_folder, "sentences", "wav")
label_folder = os.path.join(session_folder, "dialog", "EmoEvaluation")
os.makedirs(wav_folder, exist_ok=True)
os.makedirs(label_folder, exist_ok=True)
wav_stems = []
for i in range(5):
for g in ["F", "M"]:
for utt in ["impro", "script"]:
speaker = f"Ses0{session}{g}"
subfolder = f"{speaker}_{utt}0{i}"
subfolder_path = os.path.join(wav_folder, subfolder)
os.makedirs(subfolder_path, exist_ok=True)
for j in range(5):
wav_stem = f"{subfolder}_F00{j}"
wav_stems.append(wav_stem)
all_samples = []
impro_samples = []
script_samples = []
wav_stems = sorted(wav_stems)
for wav_stem in wav_stems:
subfolder = wav_stem[:-5]
speaker = subfolder.split("_")[0]
wav_file = os.path.join(wav_folder, subfolder, wav_stem + ".wav")
wav = _save_wav(wav_file, seed=0)
label = _save_label(label_folder, subfolder + ".txt", wav_stem)
if label == "xxx":
continue
sample = (wav, SAMPLE_RATE, wav_stem, label, speaker)
all_samples.append(sample)
if "impro" in subfolder:
impro_samples.append(sample)
else:
script_samples.append(sample)
return all_samples, script_samples, impro_samples
def get_mock_dataset(dataset_dir: str):
os.makedirs(dataset_dir, exist_ok=True)
all_samples = []
script_samples = []
impro_samples = []
for session in range(1, 4):
samples = _get_samples(dataset_dir, session)
all_samples += samples[0]
script_samples += samples[1]
impro_samples += samples[2]
return all_samples, script_samples, impro_samples
class TestIemocap(TempDirMixin, TorchaudioTestCase):
root_dir = None
backend = "default"
all_samples = []
script_samples = []
impro_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "IEMOCAP")
cls.all_samples, cls.script_samples, cls.impro_samples = get_mock_dataset(dataset_dir)
def _testIEMOCAP(self, dataset, samples):
num_samples = 0
for i, data in enumerate(dataset):
self.assertEqual(data, samples[i])
num_samples += 1
assert num_samples == len(samples)
def testIEMOCAPFullDataset(self):
dataset = iemocap.IEMOCAP(self.root_dir)
self._testIEMOCAP(dataset, self.all_samples)
def testIEMOCAPScriptedDataset(self):
dataset = iemocap.IEMOCAP(self.root_dir, utterance_type="scripted")
self._testIEMOCAP(dataset, self.script_samples)
def testIEMOCAPImprovisedDataset(self):
dataset = iemocap.IEMOCAP(self.root_dir, utterance_type="improvised")
self._testIEMOCAP(dataset, self.impro_samples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model.utils import bias_init_with_prob, normal_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes: int,
num_ins: int,
in_channels: int,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for j in range(self.stacked_convs):
chn = self.in_channels if j == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def init_weights(self) -> None:
"""Initialize weights of the head."""
super().init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats: Tuple[Tensor]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes: int,
num_ins: int,
in_channels: int,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for j in range(self.stacked_convs):
chn = self.in_channels if j == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def init_weights(self) -> None:
"""Initialize weights of the head."""
super().init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats: Tuple[Tensor]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
"""Open Weather Map tool spec."""
from typing import Any, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class OpenWeatherMapToolSpec(BaseToolSpec):
"""Open Weather tool spec."""
spec_functions = ["weather_at_location", "forecast_tomorrow_at_location"]
def __init__(self, key: str, temp_units: str = "celsius") -> None:
"""Initialize with parameters."""
try:
from pyowm import OWM
except ImportError:
raise ImportError(
"The OpenWeatherMap tool requires the pyowm package to be installed. "
"Please install it using `pip install pyowm`."
)
self.key = key
self.temp_units = temp_units
self._owm = OWM(self.key)
self._mgr = self._owm.weather_manager()
def _format_temp(self, temperature: Any, temp_unit: str) -> str:
return (
f" - Current: {temperature['temp']}{temp_unit}\n"
f" - High: {temperature['temp_max']}{temp_unit}\n"
f" - Low: {temperature['temp_min']}{temp_unit}\n"
f" - Feels like: {temperature['feels_like']}{temp_unit}"
)
def _format_weather(
self, place: str, temp_str: str, w: Any, time_str: str = "now"
) -> str:
"""
Format weather response from OpenWeatherMap.
Function thanks to
langchain/utilities/openweathermap.py
"""
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds
return (
f"In {place}, the weather for {time_str} is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
"Temperature: \n"
f"{temp_str}\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index!s}\n"
f"Cloud cover: {clouds}%"
)
def weather_at_location(self, location: str) -> List[Document]:
"""
Finds the current weather at a location.
Args:
place (str):
The place to find the weather at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
try:
observation = self._mgr.weather_at_place(location)
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
w = observation.weather
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w)
return [Document(text=weather_text, metadata={"weather from": location})]
def forecast_tomorrow_at_location(self, location: str) -> List[Document]:
"""
Finds the weather forecast for tomorrow at a location.
Args:
location (str):
The location to find the weather tomorrow at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
from pyowm.utils import timestamps
try:
forecast = self._mgr.forecast_at_place(location, "3h")
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
tomorrow = timestamps.tomorrow()
w = forecast.get_weather_at(tomorrow)
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w, "tomorrow")
return [
Document(
text=weather_text,
metadata={
"weather from": location,
"forecast for": tomorrow.strftime("%Y-%m-%d"),
},
)
]
|
"""Open Weather Map tool spec."""
from typing import Any, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class OpenWeatherMapToolSpec(BaseToolSpec):
"""Open Weather tool spec."""
spec_functions = ["weather_at_location", "forecast_tomorrow_at_location"]
def __init__(self, key: str, temp_units: str = "celsius") -> None:
"""Initialize with parameters."""
try:
from pyowm import OWM
except ImportError:
raise ImportError(
"The OpenWeatherMap tool requires the pyowm package to be installed. "
"Please install it using `pip install pyowm`."
)
self.key = key
self.temp_units = temp_units
self._owm = OWM(self.key)
self._mgr = self._owm.weather_manager()
def _format_temp(self, temperature: Any, temp_unit: str) -> str:
return (
f" - Current: {temperature['temp']}{temp_unit}\n"
f" - High: {temperature['temp_max']}{temp_unit}\n"
f" - Low: {temperature['temp_min']}{temp_unit}\n"
f" - Feels like: {temperature['feels_like']}{temp_unit}"
)
def _format_weather(
self, place: str, temp_str: str, w: Any, time_str: str = "now"
) -> str:
"""Format weather response from OpenWeatherMap.
Function thanks to
langchain/utilities/openweathermap.py
"""
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds
return (
f"In {place}, the weather for {time_str} is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
"Temperature: \n"
f"{temp_str}\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index!s}\n"
f"Cloud cover: {clouds}%"
)
def weather_at_location(self, location: str) -> List[Document]:
"""
Finds the current weather at a location.
Args:
place (str):
The place to find the weather at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
try:
observation = self._mgr.weather_at_place(location)
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
w = observation.weather
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w)
return [Document(text=weather_text, metadata={"weather from": location})]
def forecast_tomorrow_at_location(self, location: str) -> List[Document]:
"""
Finds the weather forecast for tomorrow at a location.
Args:
location (str):
The location to find the weather tomorrow at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
from pyowm.utils import timestamps
try:
forecast = self._mgr.forecast_at_place(location, "3h")
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
tomorrow = timestamps.tomorrow()
w = forecast.get_weather_at(tomorrow)
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w, "tomorrow")
return [
Document(
text=weather_text,
metadata={
"weather from": location,
"forecast for": tomorrow.strftime("%Y-%m-%d"),
},
)
]
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc]()
da_generator = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc]()
da_generator = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
import os
import time
import uuid
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
cur_dir = os.path.dirname(os.path.abspath(__file__))
qdrant_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {qdrant_yml} up -d --remove-orphans")
time.sleep(1)
yield
os.system(f"docker-compose -f {qdrant_yml} down --remove-orphans")
@pytest.fixture(scope='function')
def tmp_collection_name():
return uuid.uuid4().hex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
for collection in client.get_collections().collections:
client.delete_collection(collection.name)
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import os
import time
import uuid
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
cur_dir = os.path.dirname(os.path.abspath(__file__))
qdrant_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {qdrant_yml} up -d --remove-orphans")
time.sleep(1)
yield
os.system(f"docker-compose -f {qdrant_yml} down --remove-orphans")
@pytest.fixture(scope='function')
def tmp_collection_name():
return uuid.uuid4().hex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
client.delete_collection(collection_name='documents')
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
_FFMPEG_INITIALIZED = False
def _init_ffmpeg():
global _FFMPEG_INITIALIZED
if _FFMPEG_INITIALIZED:
return
if not torch.ops.torchaudio.is_ffmpeg_available():
raise RuntimeError(
"torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
)
try:
_load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
import torchaudio._torchaudio_ffmpeg # noqa
torch.ops.torchaudio.ffmpeg_init()
if torch.ops.torchaudio.ffmpeg_get_log_level() > 8:
torch.ops.torchaudio.ffmpeg_set_log_level(8)
_FFMPEG_INITIALIZED = True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
# Because this part is executed as part of `import torchaudio`, we ignore the
# initialization failure.
# If the FFmpeg integration is not properly initialized, then detailed error
# will be raised when client code attempts to import the dedicated feature.
try:
_init_ffmpeg()
except Exception:
pass
def _check_cuda_version():
version = torch.ops.torchaudio.cuda_version()
if version is not None and torch.version.cuda is not None:
version_str = str(version)
ta_version = f"{version_str[:-3]}.{version_str[-2]}"
t_version = torch.version.cuda
if ta_version != t_version:
raise RuntimeError(
"Detected that PyTorch and TorchAudio were compiled with different CUDA versions. "
f"PyTorch has CUDA version {t_version} whereas TorchAudio has CUDA version {ta_version}. "
"Please install the TorchAudio version that matches your PyTorch version."
)
_init_extension()
_check_cuda_version()
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
_FFMPEG_INITIALIZED = False
def _init_ffmpeg():
global _FFMPEG_INITIALIZED
if _FFMPEG_INITIALIZED:
return
if not torch.ops.torchaudio.is_ffmpeg_available():
raise RuntimeError(
"torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
)
try:
_load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
import torchaudio._torchaudio_ffmpeg # noqa
torch.ops.torchaudio.ffmpeg_init()
if torch.ops.torchaudio.ffmpeg_get_log_level() > 8:
torch.ops.torchaudio.ffmpeg_set_log_level(8)
_FFMPEG_INITIALIZED = True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
# Because this part is executed as part of `import torchaudio`, we ignore the
# initialization failure.
# If the FFmpeg integration is not properly initialized, then detailed error
# will be raised when client code attempts to import the dedicated feature.
try:
_init_ffmpeg()
except Exception:
pass
_init_extension()
|
import warnings
from typing import Any, List
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[BETA] [DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: torch.Tensor) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: torch.Tensor) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import torchaudio
_STREAM_READER = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not torchaudio._extension._FFMPEG_INITIALIZED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
else:
from . import _stream_writer
item = getattr(_stream_writer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
import torchaudio
_STREAM_READER = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
else:
from . import _stream_writer
item = getattr(_stream_writer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_smolvlm import *
from .image_processing_smolvlm import *
from .image_processing_smolvlm_fast import *
from .modeling_smolvlm import *
from .processing_smolvlm import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_smolvlm import *
from .image_processing_smolvlm import *
from .modeling_smolvlm import *
from .processing_smolvlm import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import os
from pathlib import Path
import pytest
from jina.hubble import HubExecutor, hubapi
from jina.hubble.hubapi import list_local
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_zip_file():
return Path(__file__).parent / 'dummy_executor.zip'
@pytest.fixture
def test_executor():
return HubExecutor(uuid='hello', name=None, commit_id='test_commit', tag='v0')
@pytest.mark.parametrize('install_deps', [True, False])
def test_install_local(executor_zip_file, test_executor, install_deps):
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
hubapi.install_local(executor_zip_file, test_executor, install_deps=install_deps)
assert hubapi.exist_local(test_executor.uuid, test_executor.tag)
assert any(
str(path).endswith(
f'{os.path.join(test_executor.uuid, test_executor.tag)}.dist-info'
)
for path in list_local()
)
hubapi.uninstall_local(test_executor.uuid)
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
def test_load_dump_secret():
import tempfile
uuid8 = 'hello'
secret = 'world'
task_id = 'UUID'
with tempfile.TemporaryDirectory() as tmpdirname:
hubapi.dump_secret(Path(tmpdirname), uuid8, secret, task_id)
new_uuid8, new_secret, new_task_id = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
assert task_id == new_task_id
def test_load_dump_secret_existing_encryption_key():
import tempfile
uuid8 = 'hello'
secret = 'world'
task_id = 'UUID'
with tempfile.TemporaryDirectory() as tmpdirname:
# creates an encryption key
hubapi.dump_secret(Path(tmpdirname), 'dummy', 'dummy', 'dummy')
# dump secret using existing encryption key
hubapi.dump_secret(Path(tmpdirname), uuid8, secret, task_id)
new_uuid8, new_secret, new_task_id = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
assert task_id == new_task_id
|
import os
from pathlib import Path
import pytest
from jina.hubble import HubExecutor, hubapi
from jina.hubble.hubapi import list_local
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_zip_file():
return Path(__file__).parent / 'dummy_executor.zip'
@pytest.fixture
def test_executor():
return HubExecutor(uuid='hello', name=None, commit_id='test_commit', tag='v0')
@pytest.mark.parametrize('install_deps', [True, False])
def test_install_local(executor_zip_file, test_executor, install_deps):
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
hubapi.install_local(executor_zip_file, test_executor, install_deps=install_deps)
assert hubapi.exist_local(test_executor.uuid, test_executor.tag)
assert any(
str(path).endswith(
f'{os.path.join(test_executor.uuid, test_executor.tag)}.dist-info'
)
for path in list_local()
)
hubapi.uninstall_local(test_executor.uuid)
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
def test_load_dump_secret():
import tempfile
uuid8 = 'hello'
secret = 'world'
with tempfile.TemporaryDirectory() as tmpdirname:
hubapi.dump_secret(Path(tmpdirname), uuid8, secret)
new_uuid8, new_secret = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
def test_load_dump_secret_existing_encryption_key():
import tempfile
uuid8 = 'hello'
secret = 'world'
with tempfile.TemporaryDirectory() as tmpdirname:
# creates an encryption key
hubapi.dump_secret(Path(tmpdirname), 'dummy', 'dummy')
# dump secret using existing encryption key
hubapi.dump_secret(Path(tmpdirname), uuid8, secret)
new_uuid8, new_secret = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True
)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.azureml_endpoint import (
AzureMLChatOnlineEndpoint,
LlamaContentFormatter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LlamaContentFormatter": "langchain_community.chat_models.azureml_endpoint",
"AzureMLChatOnlineEndpoint": "langchain_community.chat_models.azureml_endpoint",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureMLChatOnlineEndpoint",
"LlamaContentFormatter",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.azureml_endpoint import (
AzureMLChatOnlineEndpoint,
LlamaContentFormatter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LlamaContentFormatter": "langchain_community.chat_models.azureml_endpoint",
"AzureMLChatOnlineEndpoint": "langchain_community.chat_models.azureml_endpoint",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LlamaContentFormatter",
"AzureMLChatOnlineEndpoint",
]
|
import pytest
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, AudioBlock
from llama_index.core.memory.memory import Memory
from llama_index.core.storage.chat_store.sql import MessageStatus
@pytest.fixture()
def memory():
"""Create a basic memory instance for testing."""
return Memory(
token_limit=1000,
token_flush_size=700,
chat_history_token_ratio=0.9,
session_id="test_user",
)
@pytest.mark.asyncio
async def test_initialization(memory):
"""Test that memory initializes correctly."""
assert memory.token_limit == 1000
assert memory.token_flush_size == 700
assert memory.session_id == "test_user"
@pytest.mark.asyncio
async def test_estimate_token_count_text(memory):
"""Test token counting for text."""
message = ChatMessage(role="user", content="Test message")
count = memory._estimate_token_count(message)
assert count == len(memory.tokenizer_fn("Test message"))
@pytest.mark.asyncio
async def test_estimate_token_count_image(memory):
"""Test token counting for images."""
block = ImageBlock(url="http://example.com/image.jpg")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.image_token_size_estimate
@pytest.mark.asyncio
async def test_estimate_token_count_audio(memory):
"""Test token counting for audio."""
block = AudioBlock(url="http://example.com/audio.mp3")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.audio_token_size_estimate
@pytest.mark.asyncio
async def test_manage_queue_under_limit(memory):
"""Test queue management when under token limit."""
# Set up a case where we're under the token limit
chat_messages = [ChatMessage(role="user", content="Short message")]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert cur_messages[0].content == "Short message"
@pytest.mark.asyncio
async def test_manage_queue_over_limit(memory):
"""Test queue management when over token limit."""
# Set up a case where we're over the token limit
chat_messages = [
ChatMessage(role="user", content="x " * 500),
ChatMessage(role="assistant", content="y " * 500),
ChatMessage(role="user", content="z " * 500),
]
# This will exceed the token limit and flush 700 tokens (two messages)
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert "z " in cur_messages[0].content
@pytest.mark.asyncio
async def test_aput(memory):
"""Test adding a message."""
message = ChatMessage(role="user", content="New message")
await memory.aput(message)
# Should add the message to the store
messages = await memory.aget()
assert len(messages) == 1
assert messages[0].content == "New message"
@pytest.mark.asyncio
async def test_aput_messages(memory):
"""Test adding multiple messages."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aput_messages(messages)
# Should add the messages to the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aset(memory):
"""Test setting the chat history."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aset(messages)
# Should set the messages in the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aget_all(memory):
"""Test getting all messages."""
await memory.aput_messages(
[
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
)
messages = await memory.aget_all(status=MessageStatus.ACTIVE)
# Should get all messages from the store
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_areset(memory):
"""Test resetting the memory."""
await memory.aput(ChatMessage(role="user", content="New message"))
await memory.areset(status=MessageStatus.ACTIVE)
# Should delete messages from the store
messages = await memory.aget()
assert len(messages) == 0
|
import pytest
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, AudioBlock
from llama_index.core.memory.memory import Memory
from llama_index.core.storage.chat_store.sql import MessageStatus
@pytest.fixture()
def memory():
"""Create a basic memory instance for testing."""
return Memory(
token_limit=1000,
token_flush_size=700,
chat_history_token_ratio=0.9,
session_id="test_user",
)
@pytest.mark.asyncio
async def test_initialization(memory):
"""Test that memory initializes correctly."""
assert memory.token_limit == 1000
assert memory.token_flush_size == 700
assert memory.session_id == "test_user"
@pytest.mark.asyncio
async def test_estimate_token_count_text(memory):
"""Test token counting for text."""
message = ChatMessage(role="user", content="Test message")
count = memory._estimate_token_count(message)
assert count == len(memory.tokenizer_fn("Test message"))
@pytest.mark.asyncio
async def test_estimate_token_count_image(memory):
"""Test token counting for images."""
block = ImageBlock(url="http://example.com/image.jpg")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.image_token_size_estimate
@pytest.mark.asyncio
async def test_estimate_token_count_audio(memory):
"""Test token counting for audio."""
block = AudioBlock(url="http://example.com/audio.mp3")
message = ChatMessage(role="user", blocks=[block])
count = memory._estimate_token_count(message)
assert count == memory.audio_token_size_estimate
@pytest.mark.asyncio
async def test_manage_queue_under_limit(memory):
"""Test queue management when under token limit."""
# Set up a case where we're under the token limit
chat_messages = [
ChatMessage(role="user", content="Short message")
]
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert cur_messages[0].content == "Short message"
@pytest.mark.asyncio
async def test_manage_queue_over_limit(memory):
"""Test queue management when over token limit."""
# Set up a case where we're over the token limit
chat_messages = [
ChatMessage(role="user", content="x " * 500),
ChatMessage(role="assistant", content="y " * 500),
ChatMessage(role="user", content="z " * 500),
]
# This will exceed the token limit and flush 700 tokens (two messages)
await memory.aput_messages(chat_messages)
cur_messages = await memory.aget()
assert len(cur_messages) == 1
assert "z " in cur_messages[0].content
@pytest.mark.asyncio
async def test_aput(memory):
"""Test adding a message."""
message = ChatMessage(role="user", content="New message")
await memory.aput(message)
# Should add the message to the store
messages = await memory.aget()
assert len(messages) == 1
assert messages[0].content == "New message"
@pytest.mark.asyncio
async def test_aput_messages(memory):
"""Test adding multiple messages."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aput_messages(messages)
# Should add the messages to the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aset(memory):
"""Test setting the chat history."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aset(messages)
# Should set the messages in the store
messages = await memory.aget()
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_aget_all(memory):
"""Test getting all messages."""
await memory.aput_messages([
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
])
messages = await memory.aget_all(status=MessageStatus.ACTIVE)
# Should get all messages from the store
assert len(messages) == 2
assert messages[0].content == "Message 1"
assert messages[1].content == "Response 1"
@pytest.mark.asyncio
async def test_areset(memory):
"""Test resetting the memory."""
await memory.aput(ChatMessage(role="user", content="New message"))
await memory.areset(status=MessageStatus.ACTIVE)
# Should delete messages from the store
messages = await memory.aget()
assert len(messages) == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:
"""Smooth L1 loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@weighted_loss
def l1_loss(pred: Tensor, target: Tensor) -> Tensor:
"""L1 loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
Returns:
Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@MODELS.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self,
beta: float = 1.0,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@MODELS.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@MODELS.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@MODELS.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import (
decode_predictions as decode_predictions,
)
from keras.src.applications.imagenet_utils import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import decode_predictions
from keras.src.applications.imagenet_utils import preprocess_input
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
GaussianNoise,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from jina import Flow
from pdf_segmenter import PDFSegmenter
from PIL import Image
def test_flow(test_dir, doc_generator_img_text, expected_text):
flow = Flow().add(uses=PDFSegmenter)
doc_array = doc_generator_img_text
for doc in doc_array:
with flow:
results = flow.post(on='/test', inputs=doc, return_results=True)
assert len(results[0].docs) == 1
chunks = results[0].docs[0].chunks
assert len(chunks) == 3
for idx, c in enumerate(chunks[:2]):
with Image.open(
os.path.join(test_dir, f'data/test_img_{idx}.jpg')
) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from jina import Flow
from PIL import Image
from ...pdf_segmenter import PDFSegmenter
def test_flow(test_dir, doc_generator_img_text, expected_text):
flow = Flow().add(uses=PDFSegmenter)
doc_array = doc_generator_img_text
for doc in doc_array:
with flow:
results = flow.post(on='/test', inputs=doc, return_results=True)
assert len(results[0].docs) == 1
chunks = results[0].docs[0].chunks
assert len(chunks) == 3
for idx, c in enumerate(chunks[:2]):
with Image.open(
os.path.join(test_dir, f'data/test_img_{idx}.jpg')
) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
import orjson
from pydantic.json import ENCODERS_BY_TYPE
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
from docarray.base_doc import BaseNode
if isinstance(obj, BaseNode):
return obj._docarray_to_json_compatible()
else:
for cls_, encoder in ENCODERS_BY_TYPE.items():
if isinstance(obj, cls_):
return encoder(obj)
return obj
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to bytes using orjson
return orjson_dumps(v, default=default).decode()
|
import orjson
from pydantic.json import ENCODERS_BY_TYPE
from docarray.typing.abstract_type import AbstractType
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
if isinstance(obj, AbstractType):
return obj._docarray_to_json_compatible()
else:
for cls_, encoder in ENCODERS_BY_TYPE.items():
if isinstance(obj, cls_):
return encoder(obj)
return obj
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to bytes using orjson
return orjson_dumps(v, default=default).decode()
|
import numpy as np
import pytest
from sklearn._loss import HalfPoissonLoss
from sklearn.neural_network._base import binary_log_loss, log_loss, poisson_loss
def test_binary_log_loss_1_prob_finite():
# y_proba is equal to one should result in a finite logloss
y_true = np.array([[0, 0, 1]]).T
y_prob = np.array([[0.9, 1.0, 1.0]]).T
loss = binary_log_loss(y_true, y_prob)
assert np.isfinite(loss)
@pytest.mark.parametrize(
"y_true, y_prob",
[
(
np.array([[1, 0, 0], [0, 1, 0]]),
np.array([[0.0, 1.0, 0.0], [0.9, 0.05, 0.05]]),
),
(np.array([[0, 0, 1]]).T, np.array([[0.9, 1.0, 1.0]]).T),
],
)
def test_log_loss_1_prob_finite(y_true, y_prob):
# y_proba is equal to 1 should result in a finite logloss
loss = log_loss(y_true, y_prob)
assert np.isfinite(loss)
def test_poisson_loss(global_random_seed):
"""Test Poisson loss against well tested HalfPoissonLoss."""
n = 1000
rng = np.random.default_rng(global_random_seed)
y_true = rng.integers(low=0, high=10, size=n).astype(float)
y_raw = rng.standard_normal(n)
y_pred = np.exp(y_raw)
sw = rng.uniform(low=0.1, high=10, size=n)
assert 0 in y_true
loss = poisson_loss(y_true=y_true, y_pred=y_pred, sample_weight=sw)
pl = HalfPoissonLoss()
loss_ref = (
pl(y_true=y_true, raw_prediction=y_raw, sample_weight=sw)
+ pl.constant_to_optimal_zero(y_true=y_true, sample_weight=sw).mean()
/ sw.mean()
)
assert loss == pytest.approx(loss_ref, rel=1e-12)
|
import numpy as np
import pytest
from sklearn.neural_network._base import binary_log_loss, log_loss
def test_binary_log_loss_1_prob_finite():
# y_proba is equal to one should result in a finite logloss
y_true = np.array([[0, 0, 1]]).T
y_prob = np.array([[0.9, 1.0, 1.0]]).T
loss = binary_log_loss(y_true, y_prob)
assert np.isfinite(loss)
@pytest.mark.parametrize(
"y_true, y_prob",
[
(
np.array([[1, 0, 0], [0, 1, 0]]),
np.array([[0.0, 1.0, 0.0], [0.9, 0.05, 0.05]]),
),
(np.array([[0, 0, 1]]).T, np.array([[0.9, 1.0, 1.0]]).T),
],
)
def test_log_loss_1_prob_finite(y_true, y_prob):
# y_proba is equal to 1 should result in a finite logloss
loss = log_loss(y_true, y_prob)
assert np.isfinite(loss)
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Pipeline")
class Pipeline(Layer):
"""Applies a series of layers to an input.
This class is useful to build a preprocessing pipeline,
in particular an image data augmentation pipeline.
Compared to a `Sequential` model, `Pipeline` features
a few important differences:
- It's not a `Model`, just a plain layer.
- When the layers in the pipeline are compatible
with `tf.data`, the pipeline will also
remain `tf.data` compatible. That is to say,
the pipeline will not attempt to convert
its inputs to backend-native tensors
when in a tf.data context (unlike a `Sequential`
model).
Example:
```python
from keras import layers
preprocessing_pipeline = layers.Pipeline([
layers.AutoContrast(),
layers.RandomZoom(0.2),
layers.RandomRotation(0.2),
])
# `ds` is a tf.data.Dataset
preprocessed_ds = ds.map(
preprocessing_pipeline,
num_parallel_calls=4,
)
```
"""
def __init__(self, layers, name=None):
super().__init__(name=name)
self._pipeline_layers = layers
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
@property
def layers(self):
return self._pipeline_layers
def call(self, inputs, training=True, mask=None):
for layer in self._pipeline_layers:
kwargs = {}
if layer._call_has_mask_arg:
kwargs["mask"] = mask
if layer._call_has_training_arg and training is not None:
kwargs["training"] = training
outputs = layer(inputs, **kwargs)
inputs = outputs
def _get_mask_from_keras_tensor(kt):
return getattr(kt, "_keras_mask", None)
mask = tree.map_structure(_get_mask_from_keras_tensor, outputs)
return outputs
def get_config(self):
config = {
"layers": serialization_lib.serialize_keras_object(
self._pipeline_layers
),
"name": self.name,
}
return config
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Pipeline")
class Pipeline(Layer):
"""Applies a series of layers to an input.
This class is useful to build a preprocessing pipeline.
Compared to a `Sequential` model, `Pipeline` features
a few important differences:
- It's not a `Model`, just a plain layer.
- When the layers in the pipeline are compatible
with `tf.data`, the pipeline will also
remain `tf.data` compatible. That is to say,
the pipeline will not attempt to convert
its inputs to backend-native tensors
when in a tf.data context (unlike a `Sequential`
model).
Example:
```python
from keras import layers
preprocessing_pipeline = layers.Pipeline([
layers.AutoContrast(),
layers.RandomZoom(0.2),
layers.RandomRotation(0.2),
])
# `ds` is a tf.data.Dataset
preprocessed_ds = ds.map(
preprocessing_pipeline,
num_parallel_calls=4,
)
```
"""
def __init__(self, layers, name=None):
super().__init__(name=name)
self._pipeline_layers = layers
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
@property
def layers(self):
return self._pipeline_layers
def call(self, inputs, training=True, mask=None):
for layer in self._pipeline_layers:
kwargs = {}
if layer._call_has_mask_arg:
kwargs["mask"] = mask
if layer._call_has_training_arg and training is not None:
kwargs["training"] = training
outputs = layer(inputs, **kwargs)
inputs = outputs
def _get_mask_from_keras_tensor(kt):
return getattr(kt, "_keras_mask", None)
mask = tree.map_structure(_get_mask_from_keras_tensor, outputs)
return outputs
def get_config(self):
config = {
"layers": serialization_lib.serialize_keras_object(
self._pipeline_layers
),
"name": self.name,
}
return config
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset'
]
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_biasing, conformer_rnnt_biasing_base, conformer_rnnt_model
from .rnnt_decoder import Hypothesis, RNNTBeamSearchBiasing
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conformer_rnnt_biasing",
"conformer_rnnt_biasing_base",
"conv_tasnet_base",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"Hypothesis",
"RNNTBeamSearchBiasing",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
]
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_biasing, conformer_rnnt_biasing_base, conformer_rnnt_model
from .rnnt_decoder import Hypothesis, RNNTBeamSearchBiasing
from .squim import (
squim_objective_base,
squim_objective_model,
squim_subjective_base,
squim_subjective_model,
SquimObjective,
SquimSubjective,
)
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conformer_rnnt_biasing",
"conformer_rnnt_biasing_base",
"conv_tasnet_base",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"Hypothesis",
"RNNTBeamSearchBiasing",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
"squim_objective_base",
"squim_objective_model",
"squim_subjective_base",
"squim_subjective_model",
"SquimObjective",
"SquimSubjective",
]
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import pairwise_cos_sim, pairwise_euclidean_sim, pairwise_manhattan_sim
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - pairwise_cos_sim(x, y)
EUCLIDEAN = lambda x, y: pairwise_euclidean_sim(x, y)
MANHATTAN = lambda x, y: pairwise_manhattan_sim(x, y)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
"""
Compute the CoSENT loss from embeddings.
Args:
embeddings: List of embeddings
Returns:
Loss value
"""
rep_anchor, rep_pos, rep_neg = embeddings
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"TripletDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"TripletDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
from jina import DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
def test_no_documents():
test_docs = DocumentArray()
f = Flow().add(uses=CLIPTextEncoder)
with f:
f.search(test_docs, {})
assert len(test_docs) == 0 # SUCCESS
|
_base_ = [
'../common/ms_3x_coco-instance.py',
'../_base_/models/cascade-mask-rcnn_r50_fpn.py'
]
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = [
'../common/mstrain_3x_coco_instance.py',
'../_base_/models/cascade_mask_rcnn_r50_fpn.py'
]
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365Toolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365Toolkit",
]
|
from tqdm import tqdm
from typing import Any, List
from llama_index.core.async_utils import asyncio_run, run_jobs
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.indices.property_graph.sub_retrievers.base import (
BasePGRetriever,
)
from llama_index.core.schema import NodeWithScore, QueryBundle
class PGRetriever(BaseRetriever):
"""
A retriever that uses multiple sub-retrievers to retrieve nodes from a property graph.
Args:
sub_retrievers (List[BasePGRetriever]):
The sub-retrievers to use.
num_workers (int, optional):
The number of workers to use for async retrieval. Defaults to 4.
use_async (bool, optional):
Whether to use async retrieval. Defaults to True.
show_progress (bool, optional):
Whether to show progress bars. Defaults to False.
"""
def __init__(
self,
sub_retrievers: List[BasePGRetriever],
num_workers: int = 4,
use_async: bool = True,
show_progress: bool = False,
**kwargs: Any,
) -> None:
self.sub_retrievers = sub_retrievers
self.use_async = use_async
self.num_workers = num_workers
self.show_progress = show_progress
def _deduplicate(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]:
seen = set()
deduped = []
for node in nodes:
if node.text not in seen:
deduped.append(node)
seen.add(node.text)
return deduped
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
results = []
if self.use_async:
return asyncio_run(self._aretrieve(query_bundle))
for sub_retriever in tqdm(self.sub_retrievers, disable=not self.show_progress):
results.extend(sub_retriever.retrieve(query_bundle))
return self._deduplicate(results)
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
tasks = []
for sub_retriever in self.sub_retrievers:
tasks.append(sub_retriever.aretrieve(query_bundle))
async_results = await run_jobs(
tasks, workers=self.num_workers, show_progress=self.show_progress
)
# flatten the results
return self._deduplicate([node for nodes in async_results for node in nodes])
|
from tqdm import tqdm
from typing import Any, List
from llama_index.core.async_utils import asyncio_run, run_jobs
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.indices.property_graph.sub_retrievers.base import (
BasePGRetriever,
)
from llama_index.core.schema import NodeWithScore, QueryBundle
class PGRetriever(BaseRetriever):
"""A retriever that uses multiple sub-retrievers to retrieve nodes from a property graph.
Args:
sub_retrievers (List[BasePGRetriever]):
The sub-retrievers to use.
num_workers (int, optional):
The number of workers to use for async retrieval. Defaults to 4.
use_async (bool, optional):
Whether to use async retrieval. Defaults to True.
show_progress (bool, optional):
Whether to show progress bars. Defaults to False.
"""
def __init__(
self,
sub_retrievers: List[BasePGRetriever],
num_workers: int = 4,
use_async: bool = True,
show_progress: bool = False,
**kwargs: Any,
) -> None:
self.sub_retrievers = sub_retrievers
self.use_async = use_async
self.num_workers = num_workers
self.show_progress = show_progress
def _deduplicate(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]:
seen = set()
deduped = []
for node in nodes:
if node.text not in seen:
deduped.append(node)
seen.add(node.text)
return deduped
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
results = []
if self.use_async:
return asyncio_run(self._aretrieve(query_bundle))
for sub_retriever in tqdm(self.sub_retrievers, disable=not self.show_progress):
results.extend(sub_retriever.retrieve(query_bundle))
return self._deduplicate(results)
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
tasks = []
for sub_retriever in self.sub_retrievers:
tasks.append(sub_retriever.aretrieve(query_bundle))
async_results = await run_jobs(
tasks, workers=self.num_workers, show_progress=self.show_progress
)
# flatten the results
return self._deduplicate([node for nodes in async_results for node in nodes])
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class NASFCOS(SingleStageDetector):
"""Implementation of `NAS-FCOS: Fast Neural Architecture Search for Object
Detection. <https://arxiv.org/abs/1906.0442>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of NASFCOS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of NASFCOS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class NASFCOS(SingleStageDetector):
"""Implementation of `NAS-FCOS: Fast Neural Architecture Search for Object
Detection. <https://arxiv.org/abs/1906.0442>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of NASFCOS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of NASFCOS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.utils import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
|
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.utils import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from .... import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param filter: filter query used for pre-filtering
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from ....math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite._search_documents(
query, limit=limit, filter=filter or {}, include_metadata=not only_id
)
return match_docs
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
)
if TYPE_CHECKING:
import numpy as np
from .... import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from ....math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite._search_documents(
query, limit=limit, include_metadata=not only_id
)
return match_docs
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Type, TypeVar
from pydantic.fields import ModelField
if TYPE_CHECKING:
from pydantic.typing import SetStr
from docarray.document.mixins.proto import ProtoMixin
T = TypeVar('T', bound='AbstractDocument')
class AbstractDocument(Iterable):
__fields__: Dict[str, ModelField]
@classmethod
@abstractmethod
def _get_nested_document_class(cls, field: str) -> Type['ProtoMixin']:
...
@classmethod
@abstractmethod
def construct(
cls: Type[T], _fields_set: Optional['SetStr'] = None, **values: Any
) -> T:
"""
construct document without calling validation
"""
...
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, Iterable, Type
from pydantic.fields import ModelField
if TYPE_CHECKING:
from docarray.document.mixins.proto import ProtoMixin
class AbstractDocument(Iterable):
__fields__: Dict[str, ModelField]
@classmethod
@abstractmethod
def _get_nested_document_class(cls, field: str) -> Type['ProtoMixin']:
...
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_perceiver import *
from .feature_extraction_perceiver import *
from .image_processing_perceiver import *
from .image_processing_perceiver_fast import *
from .modeling_perceiver import *
from .tokenization_perceiver import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_perceiver import *
from .feature_extraction_perceiver import *
from .image_processing_perceiver import *
from .modeling_perceiver import *
from .tokenization_perceiver import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
from __future__ import annotations
import os
import platform
import tempfile
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
This is only required for Ubuntu, as we otherwise have disk space issues there.
"""
if os.environ.get("CI", None) and platform.system() == "Linux":
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
import platform
import tempfile
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> "DatasetDict":
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
This is only required for Ubuntu, as we otherwise have disk space issues there.
"""
if os.environ.get("CI", None) and platform.system() == "Linux":
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_save(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
'dump_path': dump_path,
},
uses_meta=metas,
)
with f:
f.post(on='/save')
new_f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
},
uses_meta=metas,
)
with new_f:
result = new_f.post(
on='/search', data=query_docs, return_results=True, parameters={'limit': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_save(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
'dump_path': dump_path,
},
uses_meta=metas,
)
with f:
f.post(on='/save')
new_f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
},
uses_meta=metas,
)
with new_f:
result = new_f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores)
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks', 'fast_nms'
]
|
from .bbox_nms import fast_nms, multiclass_nms
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores)
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks', 'fast_nms'
]
|
"""Integration test for Wolfram Alpha API Wrapper."""
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = WolframAlphaAPIWrapper()
output = search.run("what is 2x+18=x+5?")
assert "x = -13" in output
|
"""Integration test for Wolfram Alpha API Wrapper."""
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = WolframAlphaAPIWrapper() # type: ignore[call-arg]
output = search.run("what is 2x+18=x+5?")
assert "x = -13" in output
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.legacy import saving as saving
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.legacy import saving
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from mmdet.models import YOLOX
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestYOLOX(TestCase):
def test_preprocess_data(self):
model = get_detector_cfg('yolox/yolox_tiny_8x8_300e_coco.py')
model.random_size_interval = 1
model.random_size_range = (10, 10)
model.input_size = (128, 128)
model = MODELS.build(model)
model.train()
self.assertTrue(isinstance(model, YOLOX))
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 128, 128]])
batch_inputs, batch_data_samples = model.preprocess_data(packed_inputs)
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
# resize after one iter
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 128, 128]])
batch_inputs, batch_data_samples = model.preprocess_data(packed_inputs)
self.assertEqual(batch_inputs.shape, (2, 3, 320, 320))
model.eval()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 128, 128]])
batch_inputs, batch_data_samples = model.preprocess_data(packed_inputs)
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from mmdet.models import YOLOX
from mmdet.registry import MODELS
from .utils import demo_mm_inputs, get_detector_cfg
class TestYOLOX(TestCase):
def test_preprocess_data(self):
model = get_detector_cfg('yolox/yolox_tiny_8x8_300e_coco.py')
model.random_size_interval = 1
model.random_size_range = (10, 10)
model.input_size = (128, 128)
model = MODELS.build(model)
model.train()
self.assertTrue(isinstance(model, YOLOX))
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 128, 128]])
batch_inputs, batch_data_samples = model.preprocess_data(packed_inputs)
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
# resize after one iter
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 128, 128]])
batch_inputs, batch_data_samples = model.preprocess_data(packed_inputs)
self.assertEqual(batch_inputs.shape, (2, 3, 320, 320))
model.eval()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 128, 128]])
batch_inputs, batch_data_samples = model.preprocess_data(packed_inputs)
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
|
from typing import Any, Sequence
from llama_index.core.base.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
)
from llama_index.core.llms.callbacks import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.core.llms.llm import LLM
class CustomLLM(LLM):
"""Simple abstract base class for custom LLMs.
Subclasses must implement the `__init__`, `_complete`,
`_stream_complete`, and `metadata` methods.
"""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response_gen)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self.chat(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, formatted=formatted, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, formatted=formatted, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@classmethod
def class_name(cls) -> str:
return "custom_llm"
|
from typing import Any, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
)
from llama_index.core.llms.callbacks import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.core.base.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.core.llms.llm import LLM
class CustomLLM(LLM):
"""Simple abstract base class for custom LLMs.
Subclasses must implement the `__init__`, `_complete`,
`_stream_complete`, and `metadata` methods.
"""
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response_gen)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self.chat(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, formatted=formatted, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, formatted=formatted, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@classmethod
def class_name(cls) -> str:
return "custom_llm"
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
from mmcv.cnn.bricks import build_plugin_layer
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Defaults to 'sum'.
pre_cfg (:obj:`ConfigDict` or dict): Specify pre-processing modules.
Defaults to None.
post_cfg (:obj:`ConfigDict` or dict): Specify post-processing modules.
Defaults to None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation: str = 'sum',
pre_cfg: OptConfigType = None,
post_cfg: OptConfigType = None,
**kwargs) -> None:
super().__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if num_levels == 1:
return self.roi_layers[0](feats[0], rois)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
from mmcv.cnn.bricks import build_plugin_layer
from torch import Tensor
from mmdet.core.utils.typing import OptConfigType
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Defaults to 'sum'.
pre_cfg (:obj:`ConfigDict` or dict): Specify pre-processing modules.
Defaults to None.
post_cfg (:obj:`ConfigDict` or dict): Specify post-processing modules.
Defaults to None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation: str = 'sum',
pre_cfg: OptConfigType = None,
post_cfg: OptConfigType = None,
**kwargs) -> None:
super().__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if num_levels == 1:
return self.roi_layers[0](feats[0], rois)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
|
import numpy as np
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from scipy import signal
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class FunctionalTestImpl(TestBaseMixin):
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
)
def test_convolve_numerics(self, leading_dims, lengths):
"""Check that convolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.convolve(x, y)
num_signals = torch.tensor(leading_dims).prod() if leading_dims else 1
x_reshaped = x.reshape((num_signals, L_x))
y_reshaped = y.reshape((num_signals, L_y))
expected = [
signal.convolve(x_reshaped[i].detach().cpu().numpy(), y_reshaped[i].detach().cpu().numpy())
for i in range(num_signals)
]
expected = torch.tensor(np.array(expected))
expected = expected.reshape(leading_dims + (-1,))
self.assertEqual(expected, actual)
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
)
def test_fftconvolve_numerics(self, leading_dims, lengths):
"""Check that fftconvolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.fftconvolve(x, y)
expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1)
expected = torch.tensor(expected)
self.assertEqual(expected, actual)
@nested_params(
[F.convolve, F.fftconvolve],
[(4, 3, 1, 2), (1,)],
[(10, 4), (2, 2, 2)],
)
def test_convolve_input_leading_dim_check(self, fn, x_shape, y_shape):
"""Check that convolve properly rejects inputs with different leading dimensions."""
x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)
y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)
with self.assertRaisesRegex(ValueError, "Leading dimensions"):
fn(x, y)
def test_add_noise_broadcast(self):
"""Check that add_noise produces correct outputs when broadcasting input dimensions."""
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(5, 1, 1, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(5, 1, 3, dtype=self.dtype, device=self.device)
snr = torch.rand(1, 1, 1, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
noise_expanded = noise.expand(*leading_dims, L)
snr_expanded = snr.expand(*leading_dims)
lengths_expanded = lengths.expand(*leading_dims)
expected = F.add_noise(waveform, noise_expanded, lengths_expanded, snr_expanded)
self.assertEqual(expected, actual)
@parameterized.expand(
[((5, 2, 3), (2, 1, 1), (5, 2), (5, 2, 3)), ((2, 1), (5,), (5,), (5,)), ((3,), (5, 2, 3), (2, 1, 1), (5, 2))]
)
def test_add_noise_leading_dim_check(self, waveform_dims, noise_dims, lengths_dims, snr_dims):
"""Check that add_noise properly rejects inputs with different leading dimension lengths."""
L = 51
waveform = torch.rand(*waveform_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*noise_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*lengths_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*snr_dims, dtype=self.dtype, device=self.device) * 10
with self.assertRaisesRegex(ValueError, "Input leading dimensions"):
F.add_noise(waveform, noise, lengths, snr)
def test_add_noise_length_check(self):
"""Check that add_noise properly rejects inputs that have inconsistent length dimensions."""
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, 50, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
with self.assertRaisesRegex(ValueError, "Length dimensions"):
F.add_noise(waveform, noise, lengths, snr)
|
import numpy as np
import torch
import torchaudio.prototype.functional as F
from scipy import signal
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class FunctionalTestImpl(TestBaseMixin):
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
)
def test_convolve_numerics(self, leading_dims, lengths):
"""Check that convolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.convolve(x, y)
num_signals = torch.tensor(leading_dims).prod() if leading_dims else 1
x_reshaped = x.reshape((num_signals, L_x))
y_reshaped = y.reshape((num_signals, L_y))
expected = [
signal.convolve(x_reshaped[i].detach().cpu().numpy(), y_reshaped[i].detach().cpu().numpy())
for i in range(num_signals)
]
expected = torch.tensor(np.array(expected))
expected = expected.reshape(leading_dims + (-1,))
self.assertEqual(expected, actual)
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
)
def test_fftconvolve_numerics(self, leading_dims, lengths):
"""Check that fftconvolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.fftconvolve(x, y)
expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1)
expected = torch.tensor(expected)
self.assertEqual(expected, actual)
@nested_params(
[F.convolve, F.fftconvolve],
[(4, 3, 1, 2), (1,)],
[(10, 4), (2, 2, 2)],
)
def test_convolve_input_leading_dim_check(self, fn, x_shape, y_shape):
"""Check that convolve properly rejects inputs with different leading dimensions."""
x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)
y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)
with self.assertRaisesRegex(ValueError, "Leading dimensions"):
fn(x, y)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .ddq_detr_head import DDQDETRHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead', 'DDQDETRHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead'
]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
"""tokenizing and encoding/decoding text using SentencePiece."""
def __init__(self, model_path: str):
"""
Initializes the Tokenizer with a SentencePiece model.
Args:
model_path (str): The path to the SentencePiece model file.
"""
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
"""
Encodes a string into a list of token IDs.
Args:
s (str): The input string to be encoded.
bos (bool): Whether to prepend the beginning-of-sequence token.
eos (bool): Whether to append the end-of-sequence token.
Returns:
List[int]: A list of token IDs.
"""
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
"""
Decodes a list of token IDs into a string.
Args:
t (List[int]): The list of token IDs to be decoded.
Returns:
str: The decoded string.
"""
return self.sp_model.decode(t)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SearxSearchResults, SearxSearchRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearxSearchRun": "langchain_community.tools",
"SearxSearchResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearxSearchResults",
"SearxSearchRun",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SearxSearchResults, SearxSearchRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearxSearchRun": "langchain_community.tools",
"SearxSearchResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearxSearchRun",
"SearxSearchResults",
]
|
import os
from pathlib import Path
import pytest
from fastapi.testclient import TestClient
@pytest.fixture(scope="module")
def client():
static_dir: Path = Path(os.getcwd()) / "static"
print(static_dir)
static_dir.mkdir(exist_ok=True)
from docs_src.custom_docs_ui.tutorial001 import app
with TestClient(app) as client:
yield client
static_dir.rmdir()
def test_swagger_ui_html(client: TestClient):
response = client.get("/docs")
assert response.status_code == 200, response.text
assert "https://unpkg.com/swagger-ui-dist@5/swagger-ui-bundle.js" in response.text
assert "https://unpkg.com/swagger-ui-dist@5/swagger-ui.css" in response.text
def test_swagger_ui_oauth2_redirect_html(client: TestClient):
response = client.get("/docs/oauth2-redirect")
assert response.status_code == 200, response.text
assert "window.opener.swaggerUIRedirectOauth2" in response.text
def test_redoc_html(client: TestClient):
response = client.get("/redoc")
assert response.status_code == 200, response.text
assert "https://unpkg.com/redoc@2/bundles/redoc.standalone.js" in response.text
def test_api(client: TestClient):
response = client.get("/users/john")
assert response.status_code == 200, response.text
assert response.json()["message"] == "Hello john"
|
import os
from pathlib import Path
import pytest
from fastapi.testclient import TestClient
@pytest.fixture(scope="module")
def client():
static_dir: Path = Path(os.getcwd()) / "static"
print(static_dir)
static_dir.mkdir(exist_ok=True)
from docs_src.custom_docs_ui.tutorial001 import app
with TestClient(app) as client:
yield client
static_dir.rmdir()
def test_swagger_ui_html(client: TestClient):
response = client.get("/docs")
assert response.status_code == 200, response.text
assert "https://unpkg.com/swagger-ui-dist@5/swagger-ui-bundle.js" in response.text
assert "https://unpkg.com/swagger-ui-dist@5/swagger-ui.css" in response.text
def test_swagger_ui_oauth2_redirect_html(client: TestClient):
response = client.get("/docs/oauth2-redirect")
assert response.status_code == 200, response.text
assert "window.opener.swaggerUIRedirectOauth2" in response.text
def test_redoc_html(client: TestClient):
response = client.get("/redoc")
assert response.status_code == 200, response.text
assert "https://unpkg.com/redoc@next/bundles/redoc.standalone.js" in response.text
def test_api(client: TestClient):
response = client.get("/users/john")
assert response.status_code == 200, response.text
assert response.json()["message"] == "Hello john"
|
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_python_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
msg = (
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise ImportError(msg)
msg = f"{name} does not exist"
raise AttributeError(msg)
|
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_python_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise ImportError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise AttributeError(f"{name} does not exist")
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright 2021 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Word Error Ratio (WER) metric."""
from jiwer import compute_measures
import datasets
_CITATION = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_DESCRIPTION = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
_KWARGS_DESCRIPTION = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class WER(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/jitsi/jiwer/"],
reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
],
)
def _compute(self, predictions=None, references=None, concatenate_texts=False):
if concatenate_texts:
return compute_measures(references, predictions)["wer"]
else:
incorrect = 0
total = 0
for prediction, reference in zip(predictions, references):
measures = compute_measures(reference, prediction)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
|
# Copyright 2021 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Word Error Ratio (WER) metric. """
from jiwer import compute_measures
import datasets
_CITATION = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_DESCRIPTION = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
_KWARGS_DESCRIPTION = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class WER(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/jitsi/jiwer/"],
reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
],
)
def _compute(self, predictions=None, references=None, concatenate_texts=False):
if concatenate_texts:
return compute_measures(references, predictions)["wer"]
else:
incorrect = 0
total = 0
for prediction, reference in zip(predictions, references):
measures = compute_measures(reference, prediction)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import os
import pytest
import torch
import whisper
@pytest.mark.parametrize("model_name", whisper.available_models())
def test_transcribe(model_name: str):
device = "cuda" if torch.cuda.is_available() else "cpu"
model = whisper.load_model(model_name).to(device)
audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac")
language = "en" if model_name.endswith(".en") else None
result = model.transcribe(audio_path, language=language, temperature=0.0, word_timestamps=True)
assert result["language"] == "en"
transcription = result["text"].lower()
assert "my fellow americans" in transcription
assert "your country" in transcription
assert "do for you" in transcription
timing_checked = False
for segment in result["segments"]:
for timing in segment["words"]:
assert timing["start"] < timing["end"]
if timing["word"].strip(" ,") == "Americans":
assert timing["start"] <= 1.8
assert timing["end"] >= 1.8
print(timing)
timing_checked = True
assert timing_checked
|
import os
import pytest
import torch
import whisper
@pytest.mark.parametrize("model_name", whisper.available_models())
def test_transcribe(model_name: str):
device = "cuda" if torch.cuda.is_available() else "cpu"
model = whisper.load_model(model_name).to(device)
audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac")
language = "en" if model_name.endswith(".en") else None
result = model.transcribe(audio_path, language=language, temperature=0.0)
assert result["language"] == "en"
transcription = result["text"].lower()
assert "my fellow americans" in transcription
assert "your country" in transcription
assert "do for you" in transcription
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from unittest.mock import patch
import transformers.commands.transformers_cli as cli
from transformers.commands.chat import ChatArguments, ChatCommand
from transformers.testing_utils import CaptureStd
class ChatCLITest(unittest.TestCase):
def test_help(self):
with patch("sys.argv", ["transformers", "chat", "--help"]), CaptureStd() as cs:
with self.assertRaises(SystemExit):
cli.main()
self.assertIn("chat interface", cs.out.lower())
@patch.object(ChatCommand, "run")
def test_cli_dispatch_model(self, run_mock):
"""
Running transformers chat with just a model should work & spawn a serve underneath
"""
args = ["transformers", "chat", "hf-internal-testing/tiny-random-gpt2"]
with patch("sys.argv", args):
cli.main()
run_mock.assert_called_once()
def test_cli_dispatch_url(self):
"""
Running transformers chat with just a URL should not work as a model should additionally be specified
"""
args = ["transformers", "chat", "localhost:8000"]
with self.assertRaises(ValueError):
with patch("sys.argv", args):
cli.main()
@patch.object(ChatCommand, "run")
def test_cli_dispatch_url_and_model(self, run_mock):
"""
Running transformers chat with a URL and a model should work
"""
args = ["transformers", "chat", "localhost:8000", "--model_name_or_path=hf-internal-testing/tiny-random-gpt2"]
with patch("sys.argv", args):
cli.main()
run_mock.assert_called_once()
def test_parsed_args(self):
with (
patch.object(ChatCommand, "__init__", return_value=None) as init_mock,
patch.object(ChatCommand, "run") as run_mock,
patch(
"sys.argv",
[
"transformers",
"chat",
"test-model",
"max_new_tokens=64",
],
),
):
cli.main()
init_mock.assert_called_once()
run_mock.assert_called_once()
parsed_args = init_mock.call_args[0][0]
self.assertEqual(parsed_args.model_name_or_path_or_address, "test-model")
self.assertEqual(parsed_args.generate_flags, ["max_new_tokens=64"])
class ChatUtilitiesTest(unittest.TestCase):
def test_save_and_clear_chat(self):
tmp_path = tempfile.mkdtemp()
args = ChatArguments(save_folder=str(tmp_path))
args.model_name_or_path_or_address = "test-model"
chat_history = [{"role": "user", "content": "hi"}]
filename = ChatCommand.save_chat(chat_history, args)
self.assertTrue(os.path.isfile(filename))
cleared = ChatCommand.clear_chat_history()
self.assertEqual(cleared, [])
def test_parse_generate_flags(self):
dummy = ChatCommand.__new__(ChatCommand)
parsed = ChatCommand.parse_generate_flags(dummy, ["temperature=0.5", "max_new_tokens=10"])
self.assertEqual(parsed["temperature"], 0.5)
self.assertEqual(parsed["max_new_tokens"], 10)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from unittest.mock import patch
import transformers.commands.transformers_cli as cli
from transformers.commands.chat import ChatArguments, ChatCommand
from transformers.testing_utils import CaptureStd
class ChatCLITest(unittest.TestCase):
def test_help(self):
with patch("sys.argv", ["transformers", "chat", "--help"]), CaptureStd() as cs:
with self.assertRaises(SystemExit):
cli.main()
self.assertIn("chat interface", cs.out.lower())
@patch.object(ChatCommand, "run")
def test_cli_dispatch(self, run_mock):
args = ["transformers", "chat", "hf-internal-testing/tiny-random-gpt2"]
with patch("sys.argv", args):
cli.main()
run_mock.assert_called_once()
def test_parsed_args(self):
with (
patch.object(ChatCommand, "__init__", return_value=None) as init_mock,
patch.object(ChatCommand, "run") as run_mock,
patch(
"sys.argv",
[
"transformers",
"chat",
"test-model",
"max_new_tokens=64",
],
),
):
cli.main()
init_mock.assert_called_once()
run_mock.assert_called_once()
parsed_args = init_mock.call_args[0][0]
self.assertEqual(parsed_args.model_name_or_path_or_address, "test-model")
self.assertEqual(parsed_args.generate_flags, ["max_new_tokens=64"])
class ChatUtilitiesTest(unittest.TestCase):
def test_save_and_clear_chat(self):
tmp_path = tempfile.mkdtemp()
args = ChatArguments(save_folder=str(tmp_path))
args.model_name_or_path_or_address = "test-model"
chat_history = [{"role": "user", "content": "hi"}]
filename = ChatCommand.save_chat(chat_history, args)
self.assertTrue(os.path.isfile(filename))
cleared = ChatCommand.clear_chat_history()
self.assertEqual(cleared, [])
def test_parse_generate_flags(self):
dummy = ChatCommand.__new__(ChatCommand)
parsed = ChatCommand.parse_generate_flags(dummy, ["temperature=0.5", "max_new_tokens=10"])
self.assertEqual(parsed["temperature"], 0.5)
self.assertEqual(parsed["max_new_tokens"], 10)
|
from diffusers.utils import is_torch_available
from diffusers.utils.testing_utils import (
backend_empty_cache,
backend_max_memory_allocated,
backend_reset_peak_memory_stats,
torch_device,
)
if is_torch_available():
import torch
import torch.nn as nn
class LoRALayer(nn.Module):
"""Wraps a linear layer with LoRA-like adapter - Used for testing purposes only
Taken from
https://github.com/huggingface/transformers/blob/566302686a71de14125717dea9a6a45b24d42b37/tests/quantization/bnb/test_4bit.py#L62C5-L78C77
"""
def __init__(self, module: nn.Module, rank: int):
super().__init__()
self.module = module
self.adapter = nn.Sequential(
nn.Linear(module.in_features, rank, bias=False),
nn.Linear(rank, module.out_features, bias=False),
)
small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=small_std)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def forward(self, input, *args, **kwargs):
return self.module(input, *args, **kwargs) + self.adapter(input)
@torch.no_grad()
@torch.inference_mode()
def get_memory_consumption_stat(model, inputs):
backend_reset_peak_memory_stats(torch_device)
backend_empty_cache(torch_device)
model(**inputs)
max_mem_allocated = backend_max_memory_allocated(torch_device)
return max_mem_allocated
|
from diffusers.utils import is_torch_available
if is_torch_available():
import torch
import torch.nn as nn
class LoRALayer(nn.Module):
"""Wraps a linear layer with LoRA-like adapter - Used for testing purposes only
Taken from
https://github.com/huggingface/transformers/blob/566302686a71de14125717dea9a6a45b24d42b37/tests/quantization/bnb/test_4bit.py#L62C5-L78C77
"""
def __init__(self, module: nn.Module, rank: int):
super().__init__()
self.module = module
self.adapter = nn.Sequential(
nn.Linear(module.in_features, rank, bias=False),
nn.Linear(rank, module.out_features, bias=False),
)
small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=small_std)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def forward(self, input, *args, **kwargs):
return self.module(input, *args, **kwargs) + self.adapter(input)
@torch.no_grad()
@torch.inference_mode()
def get_memory_consumption_stat(model, inputs):
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
model(**inputs)
max_memory_mem_allocated = torch.cuda.max_memory_allocated()
return max_memory_mem_allocated
|
import os
from datetime import datetime
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.tools import FunctionTool
from llama_index.llms.novita import NovitaAI
model = "meta-llama/llama-3.1-8b-instruct"
model_function_calling = "deepseek/deepseek_v3"
api_key = os.environ.get("NOVITA_API_KEY", "")
def test_llm_class():
names_of_base_classes = [b.__name__ for b in NovitaAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_novita_llm_model_alias():
llm = NovitaAI(model=model, api_key=api_key)
assert llm.model == model
def test_novita_llm_metadata():
llm = NovitaAI(model=model_function_calling, api_key=api_key)
assert llm.metadata.is_function_calling_model is True
llm = NovitaAI(model=model, api_key=api_key)
assert llm.metadata.is_function_calling_model is False
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
def test_completion():
llm = NovitaAI(model=model, api_key=api_key)
response = llm.complete("who are you")
print(response)
assert response
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
@pytest.mark.asyncio
async def test_async_completion():
llm = NovitaAI(model=model, api_key=api_key)
response = await llm.acomplete("who are you")
print(response)
assert response
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
def test_stream_complete():
llm = NovitaAI(model=model, api_key=api_key)
response = llm.stream_complete("who are you")
responses = []
for r in response:
responses.append(r)
print(r.delta, end="")
assert responses
assert len(responses) > 0
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
@pytest.mark.asyncio
async def test_astream_complete():
llm = NovitaAI(model=model, api_key=api_key)
response = await llm.astream_complete("who are you")
responses = []
async for r in response:
responses.append(r)
print(r.delta, end="")
assert responses
assert len(responses) > 0
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
def test_function_calling():
def get_current_time() -> dict:
"""Get the current time."""
return {"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
llm = NovitaAI(model=model_function_calling, api_key=api_key)
tool = FunctionTool.from_defaults(fn=get_current_time)
response = llm.predict_and_call([tool], "What is the current time?")
print(response)
assert response
|
import os
from datetime import datetime
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.tools import FunctionTool
from llama_index.llms.novita import NovitaAI
model = "meta-llama/llama-3.1-8b-instruct"
model_function_calling = "deepseek/deepseek_v3"
api_key = os.environ.get("NOVITA_API_KEY", "")
def test_llm_class():
names_of_base_classes = [b.__name__ for b in NovitaAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_novita_llm_model_alias():
llm = NovitaAI(model=model, api_key=api_key)
assert llm.model == model
def test_novita_llm_metadata():
llm = NovitaAI(model=model_function_calling, api_key=api_key)
assert llm.metadata.is_function_calling_model is True
llm = NovitaAI(model=model, api_key=api_key)
assert llm.metadata.is_function_calling_model is False
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
def test_completion():
llm = NovitaAI(model=model, api_key=api_key)
response = llm.complete("who are you")
print(response)
assert response
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
@pytest.mark.asyncio()
async def test_async_completion():
llm = NovitaAI(model=model, api_key=api_key)
response = await llm.acomplete("who are you")
print(response)
assert response
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
def test_stream_complete():
llm = NovitaAI(model=model, api_key=api_key)
response = llm.stream_complete("who are you")
responses = []
for r in response:
responses.append(r)
print(r.delta, end="")
assert responses
assert len(responses) > 0
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
@pytest.mark.asyncio()
async def test_astream_complete():
llm = NovitaAI(model=model, api_key=api_key)
response = await llm.astream_complete("who are you")
responses = []
async for r in response:
responses.append(r)
print(r.delta, end="")
assert responses
assert len(responses) > 0
@pytest.mark.skipif(not api_key, reason="No Novita API key set")
def test_function_calling():
def get_current_time() -> dict:
"""Get the current time."""
return {"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
llm = NovitaAI(model=model_function_calling, api_key=api_key)
tool = FunctionTool.from_defaults(fn=get_current_time)
response = llm.predict_and_call([tool], "What is the current time?")
print(response)
assert response
|
"""Prompt Mixin."""
from abc import ABC, abstractmethod
from collections import defaultdict
from copy import deepcopy
from typing import Dict, Union
from llama_index.core.prompts.base import BasePromptTemplate
HasPromptType = Union["PromptMixin", BasePromptTemplate]
PromptDictType = Dict[str, BasePromptTemplate]
PromptMixinType = Dict[str, "PromptMixin"]
class PromptMixin(ABC):
"""
Prompt mixin.
This mixin is used in other modules, like query engines, response synthesizers.
This shows that the module supports getting, setting prompts,
both within the immediate module as well as child modules.
"""
def _validate_prompts(
self,
prompts_dict: PromptDictType,
module_dict: PromptMixinType,
) -> None:
"""Validate prompts."""
# check if prompts_dict, module_dict has restricted ":" token
for key in prompts_dict:
if ":" in key:
raise ValueError(f"Prompt key {key} cannot contain ':'.")
for key in module_dict:
if ":" in key:
raise ValueError(f"Prompt key {key} cannot contain ':'.")
def get_prompts(self) -> Dict[str, BasePromptTemplate]:
"""Get a prompt."""
prompts_dict = self._get_prompts()
module_dict = self._get_prompt_modules()
self._validate_prompts(prompts_dict, module_dict)
# avoid modifying the original dict
all_prompts = deepcopy(prompts_dict)
for module_name, prompt_module in module_dict.items():
# append module name to each key in sub-modules by ":"
for key, prompt in prompt_module.get_prompts().items():
all_prompts[f"{module_name}:{key}"] = prompt
return all_prompts
def update_prompts(self, prompts_dict: Dict[str, BasePromptTemplate]) -> None:
"""
Update prompts.
Other prompts will remain in place.
"""
prompt_modules = self._get_prompt_modules()
# update prompts for current module
self._update_prompts(prompts_dict)
# get sub-module keys
# mapping from module name to sub-module prompt keys
sub_prompt_dicts: Dict[str, PromptDictType] = defaultdict(dict)
for key in prompts_dict:
if ":" in key:
module_name, sub_key = key.split(":")
sub_prompt_dicts[module_name][sub_key] = prompts_dict[key]
# now update prompts for submodules
for module_name, sub_prompt_dict in sub_prompt_dicts.items():
if module_name not in prompt_modules:
raise ValueError(f"Module {module_name} not found.")
module = prompt_modules[module_name]
module.update_prompts(sub_prompt_dict)
@abstractmethod
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
@abstractmethod
def _get_prompt_modules(self) -> PromptMixinType:
"""
Get prompt sub-modules.
Return a dictionary of sub-modules within the current module
that also implement PromptMixin (so that their prompts can also be get/set).
Can be blank if no sub-modules.
"""
@abstractmethod
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
|
"""Prompt Mixin."""
from abc import ABC, abstractmethod
from collections import defaultdict
from copy import deepcopy
from typing import Dict, Union
from llama_index.core.prompts.base import BasePromptTemplate
HasPromptType = Union["PromptMixin", BasePromptTemplate]
PromptDictType = Dict[str, BasePromptTemplate]
PromptMixinType = Dict[str, "PromptMixin"]
class PromptMixin(ABC):
"""Prompt mixin.
This mixin is used in other modules, like query engines, response synthesizers.
This shows that the module supports getting, setting prompts,
both within the immediate module as well as child modules.
"""
def _validate_prompts(
self,
prompts_dict: PromptDictType,
module_dict: PromptMixinType,
) -> None:
"""Validate prompts."""
# check if prompts_dict, module_dict has restricted ":" token
for key in prompts_dict:
if ":" in key:
raise ValueError(f"Prompt key {key} cannot contain ':'.")
for key in module_dict:
if ":" in key:
raise ValueError(f"Prompt key {key} cannot contain ':'.")
def get_prompts(self) -> Dict[str, BasePromptTemplate]:
"""Get a prompt."""
prompts_dict = self._get_prompts()
module_dict = self._get_prompt_modules()
self._validate_prompts(prompts_dict, module_dict)
# avoid modifying the original dict
all_prompts = deepcopy(prompts_dict)
for module_name, prompt_module in module_dict.items():
# append module name to each key in sub-modules by ":"
for key, prompt in prompt_module.get_prompts().items():
all_prompts[f"{module_name}:{key}"] = prompt
return all_prompts
def update_prompts(self, prompts_dict: Dict[str, BasePromptTemplate]) -> None:
"""Update prompts.
Other prompts will remain in place.
"""
prompt_modules = self._get_prompt_modules()
# update prompts for current module
self._update_prompts(prompts_dict)
# get sub-module keys
# mapping from module name to sub-module prompt keys
sub_prompt_dicts: Dict[str, PromptDictType] = defaultdict(dict)
for key in prompts_dict:
if ":" in key:
module_name, sub_key = key.split(":")
sub_prompt_dicts[module_name][sub_key] = prompts_dict[key]
# now update prompts for submodules
for module_name, sub_prompt_dict in sub_prompt_dicts.items():
if module_name not in prompt_modules:
raise ValueError(f"Module {module_name} not found.")
module = prompt_modules[module_name]
module.update_prompts(sub_prompt_dict)
@abstractmethod
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
@abstractmethod
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules.
Return a dictionary of sub-modules within the current module
that also implement PromptMixin (so that their prompts can also be get/set).
Can be blank if no sub-modules.
"""
@abstractmethod
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.