input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import threading
from typing import Callable, ParamSpec, TypeVar
P = ParamSpec("P")
R = TypeVar("R")
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
|
from typing import Callable, TypeVar, ParamSpec
import threading
P = ParamSpec("P")
R = TypeVar("R")
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
|
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.generic import GenericWebhookType
class GenericWebhookTriggerBlock(Block):
class Input(BlockSchema):
payload: dict = SchemaField(hidden=True, default_factory=dict)
constants: dict = SchemaField(
description="The constants to be set when the block is put on the graph",
default_factory=dict,
)
class Output(BlockSchema):
payload: dict = SchemaField(
description="The complete webhook payload that was received from the generic webhook."
)
constants: dict = SchemaField(
description="The constants to be set when the block is put on the graph"
)
example_payload = {"message": "Hello, World!"}
def __init__(self):
super().__init__(
id="8fa8c167-2002-47ce-aba8-97572fc5d387",
description="This block will output the contents of the generic input for the webhook.",
categories={BlockCategory.INPUT},
input_schema=GenericWebhookTriggerBlock.Input,
output_schema=GenericWebhookTriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.GENERIC_WEBHOOK,
webhook_type=GenericWebhookType.PLAIN,
),
test_input={"constants": {"key": "value"}, "payload": self.example_payload},
test_output=[
("constants", {"key": "value"}),
("payload", self.example_payload),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "constants", input_data.constants
yield "payload", input_data.payload
|
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.generic import GenericWebhookType
class GenericWebhookTriggerBlock(Block):
class Input(BlockSchema):
payload: dict = SchemaField(hidden=True, default={})
constants: dict = SchemaField(
description="The constants to be set when the block is put on the graph",
default={},
)
class Output(BlockSchema):
payload: dict = SchemaField(
description="The complete webhook payload that was received from the generic webhook."
)
constants: dict = SchemaField(
description="The constants to be set when the block is put on the graph"
)
example_payload = {"message": "Hello, World!"}
def __init__(self):
super().__init__(
id="8fa8c167-2002-47ce-aba8-97572fc5d387",
description="This block will output the contents of the generic input for the webhook.",
categories={BlockCategory.INPUT},
input_schema=GenericWebhookTriggerBlock.Input,
output_schema=GenericWebhookTriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.GENERIC_WEBHOOK,
webhook_type=GenericWebhookType.PLAIN,
),
test_input={"constants": {"key": "value"}, "payload": self.example_payload},
test_output=[
("constants", {"key": "value"}),
("payload", self.example_payload),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "constants", input_data.constants
yield "payload", input_data.payload
|
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
import random
import math
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
from docarray.document.any_document import AnyDocument
from docarray.document.document import BaseDocument
__all__ = ['AnyDocument', 'BaseDocument']
|
from docarray.document.any_document import AnyDocument
from docarray.document.document import BaseDocument
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
|
from __future__ import annotations
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import NamedTuple
REPO_ROOT = Path(__file__).absolute().parents[3]
PYPROJECT = REPO_ROOT / "pyproject.toml"
DICTIONARY = REPO_ROOT / "tools" / "linter" / "dictionary.txt"
FORBIDDEN_WORDS = {
"multipy", # project pytorch/multipy is dead # codespell:ignore multipy
}
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def format_error_message(
filename: str,
error: Exception | None = None,
*,
message: str | None = None,
) -> LintMessage:
if message is None and error is not None:
message = (
f"Failed due to {error.__class__.__name__}:\n{error}\n"
"Please either fix the error or add the word(s) to the dictionary file.\n"
"HINT: all-lowercase words in the dictionary can cover all case variations."
)
return LintMessage(
path=filename,
line=None,
char=None,
code="CODESPELL",
severity=LintSeverity.ERROR,
name="spelling error",
original=None,
replacement=None,
description=message,
)
def run_codespell(path: Path) -> str:
try:
return subprocess.check_output(
[
sys.executable,
"-m",
"codespell_lib",
"--toml",
str(PYPROJECT),
str(path),
],
stderr=subprocess.STDOUT,
text=True,
encoding="utf-8",
)
except subprocess.CalledProcessError as exc:
raise ValueError(exc.output) from exc
def check_file(filename: str) -> list[LintMessage]:
path = Path(filename).absolute()
try:
run_codespell(path)
except Exception as err:
return [format_error_message(filename, err)]
return []
def check_dictionary(filename: str) -> list[LintMessage]:
"""Check the dictionary file for duplicates."""
path = Path(filename).absolute()
try:
words = path.read_text(encoding="utf-8").splitlines()
words_set = set(words)
if len(words) != len(words_set):
raise ValueError("The dictionary file contains duplicate entries.")
uncased_words = list(map(str.lower, words))
if uncased_words != sorted(uncased_words):
raise ValueError(
"The dictionary file is not sorted alphabetically (case-insensitive)."
)
for forbidden_word in sorted(
FORBIDDEN_WORDS & (words_set | set(uncased_words))
):
raise ValueError(
f"The dictionary file contains a forbidden word: {forbidden_word!r}. "
"Please remove it from the dictionary file and use 'codespell:ignore' "
"inline comment instead."
)
except Exception as err:
return [format_error_message(str(filename), err)]
return []
def main() -> None:
parser = argparse.ArgumentParser(
description="Check files for spelling mistakes using codespell.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(processName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ProcessPoolExecutor(
max_workers=os.cpu_count(),
) as executor:
futures = {executor.submit(check_file, x): x for x in args.filenames}
futures[executor.submit(check_dictionary, str(DICTIONARY))] = str(DICTIONARY)
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
from __future__ import annotations
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import NamedTuple
REPO_ROOT = Path(__file__).absolute().parents[3]
PYPROJECT = REPO_ROOT / "pyproject.toml"
DICTIONARY = REPO_ROOT / "tools" / "linter" / "dictionary.txt"
FORBIDDEN_WORDS = {
"multipy", # project pytorch/multipy is dead # codespell:ignore multipy
}
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def format_error_message(
filename: str,
error: Exception | None = None,
*,
message: str | None = None,
) -> LintMessage:
if message is None and error is not None:
message = (
f"Failed due to {error.__class__.__name__}:\n{error}\n"
"Please either fix the error or "
"add the word(s) to the dictionary file (lowercase is preferred)."
)
return LintMessage(
path=filename,
line=None,
char=None,
code="CODESPELL",
severity=LintSeverity.ERROR,
name="spelling error",
original=None,
replacement=None,
description=message,
)
def run_codespell(path: Path) -> str:
try:
return subprocess.check_output(
[
sys.executable,
"-m",
"codespell_lib",
"--toml",
str(PYPROJECT),
str(path),
],
stderr=subprocess.STDOUT,
text=True,
encoding="utf-8",
)
except subprocess.CalledProcessError as exc:
raise ValueError(exc.output) from exc
def check_file(filename: str) -> list[LintMessage]:
path = Path(filename).absolute()
try:
run_codespell(path)
except Exception as err:
return [format_error_message(filename, err)]
return []
def check_dictionary(filename: str) -> list[LintMessage]:
"""Check the dictionary file for duplicates."""
path = Path(filename).absolute()
try:
words = path.read_text(encoding="utf-8").splitlines()
words_set = set(words)
if len(words) != len(words_set):
raise ValueError("The dictionary file contains duplicate entries.")
uncased_words = list(map(str.lower, words))
if uncased_words != sorted(uncased_words):
raise ValueError(
"The dictionary file is not sorted alphabetically (case-insensitive)."
)
for forbidden_word in sorted(
FORBIDDEN_WORDS & (words_set | set(uncased_words))
):
raise ValueError(
f"The dictionary file contains a forbidden word: {forbidden_word!r}. "
"Please remove it from the dictionary file and use 'codespell:ignore' "
"inline comment instead."
)
except Exception as err:
return [format_error_message(str(filename), err)]
return []
def main() -> None:
parser = argparse.ArgumentParser(
description="Check files for spelling mistakes using codespell.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(processName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ProcessPoolExecutor(
max_workers=os.cpu_count(),
) as executor:
futures = {executor.submit(check_file, x): x for x in args.filenames}
futures[executor.submit(check_dictionary, str(DICTIONARY))] = str(DICTIONARY)
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.23.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
"""ChatGPT Plugiun Tool."""
from typing import List, Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.openapi.base import OpenAPIToolSpec
class ChatGPTPluginToolSpec(BaseToolSpec):
"""
ChatGPT Plugin Tool.
This tool leverages the OpenAPI tool spec to automatically load ChatGPT
plugins from a manifest file.
You should also provide the Requests tool spec to allow the Agent to make calls to the OpenAPI endpoints
To use endpoints with authorization, use the Requests tool spec with the authorization headers
"""
spec_functions = ["load_openapi_spec", "describe_plugin"]
def __init__(
self, manifest: Optional[dict] = None, manifest_url: Optional[str] = None
):
import yaml
if manifest and manifest_url:
raise ValueError("You cannot provide both a manifest and a manifest_url")
elif manifest:
pass
elif manifest_url:
response = requests.get(manifest_url).text
manifest = yaml.safe_load(response)
else:
raise ValueError("You must provide either a manifest or a manifest_url")
if manifest["api"]["type"] != "openapi":
raise ValueError(
f'API type must be "openapi", not "{manifest["api"]["type"]}"'
)
if manifest["auth"]["type"] != "none":
raise ValueError("Authentication cannot be supported for ChatGPT plugins")
self.openapi = OpenAPIToolSpec(url=manifest["api"]["url"])
self.plugin_description = f"""
'human_description': {manifest["description_for_human"]}
'model_description': {manifest["description_for_model"]}
"""
def load_openapi_spec(self) -> List[Document]:
"""
You are an AI agent specifically designed to retrieve information by making web requests to an API based on an OpenAPI specification.
Here's a step-by-step guide to assist you in answering questions:
1. Determine the base URL required for making the request
2. Identify the relevant paths necessary to address the question
3. Find the required parameters for making the request
4. Perform the necessary requests to obtain the answer
Returns:
Document: A List of Document objects describing the OpenAPI spec
"""
return self.openapi.load_openapi_spec()
def describe_plugin(self) -> List[Document]:
return self.plugin_description
|
"""ChatGPT Plugiun Tool."""
from typing import List, Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.openapi.base import OpenAPIToolSpec
class ChatGPTPluginToolSpec(BaseToolSpec):
"""
ChatGPT Plugin Tool.
This tool leverages the OpenAPI tool spec to automatically load ChatGPT
plugins from a manifest file.
You should also provide the Requests tool spec to allow the Agent to make calls to the OpenAPI endpoints
To use endpoints with authorization, use the Requests tool spec with the authorization headers
"""
spec_functions = ["load_openapi_spec", "describe_plugin"]
def __init__(
self, manifest: Optional[dict] = None, manifest_url: Optional[str] = None
):
import yaml
if manifest and manifest_url:
raise ValueError("You cannot provide both a manifest and a manifest_url")
elif manifest:
pass
elif manifest_url:
response = requests.get(manifest_url).text
manifest = yaml.safe_load(response)
else:
raise ValueError("You must provide either a manifest or a manifest_url")
if manifest["api"]["type"] != "openapi":
raise ValueError(
f'API type must be "openapi", not "{manifest["api"]["type"]}"'
)
if manifest["auth"]["type"] != "none":
raise ValueError("Authentication cannot be supported for ChatGPT plugins")
self.openapi = OpenAPIToolSpec(url=manifest["api"]["url"])
self.plugin_description = f"""
'human_description': {manifest['description_for_human']}
'model_description': {manifest['description_for_model']}
"""
def load_openapi_spec(self) -> List[Document]:
"""
You are an AI agent specifically designed to retrieve information by making web requests to an API based on an OpenAPI specification.
Here's a step-by-step guide to assist you in answering questions:
1. Determine the base URL required for making the request
2. Identify the relevant paths necessary to address the question
3. Find the required parameters for making the request
4. Perform the necessary requests to obtain the answer
Returns:
Document: A List of Document objects describing the OpenAPI spec
"""
return self.openapi.load_openapi_spec()
def describe_plugin(self) -> List[Document]:
return self.plugin_description
|
from langchain_core.prompts import PromptTemplate
template = """You are a teacher coming up with questions to ask on a quiz.
Given the following document, please generate a question and answer based on that document.
Example Format:
<Begin Document>
...
<End Document>
QUESTION: question here
ANSWER: answer here
These questions should be detailed and be based explicitly on information in the document. Begin!
<Begin Document>
{doc}
<End Document>""" # noqa: E501
PROMPT = PromptTemplate(
input_variables=["doc"],
template=template,
)
|
# flake8: noqa
from langchain.output_parsers.regex import RegexParser
from langchain_core.prompts import PromptTemplate
template = """You are a teacher coming up with questions to ask on a quiz.
Given the following document, please generate a question and answer based on that document.
Example Format:
<Begin Document>
...
<End Document>
QUESTION: question here
ANSWER: answer here
These questions should be detailed and be based explicitly on information in the document. Begin!
<Begin Document>
{doc}
<End Document>"""
PROMPT = PromptTemplate(
input_variables=["doc"],
template=template,
)
|
import torch
from torch import Tensor
def _box_cxcywh_to_xyxy(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (cx, cy, w, h) format to (x1, y1, x2, y2) format.
(cx, cy) refers to center of bounding box
(w, h) are width and height of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (cx, cy, w, h) format which will be converted.
Returns:
boxes (Tensor(N, 4)): boxes in (x1, y1, x2, y2) format.
"""
# We need to change all 4 of them so some temporary variable is needed.
cx, cy, w, h = boxes.unbind(-1)
x1 = cx - 0.5 * w
y1 = cy - 0.5 * h
x2 = cx + 0.5 * w
y2 = cy + 0.5 * h
boxes = torch.stack((x1, y1, x2, y2), dim=-1)
return boxes
def _box_xyxy_to_cxcywh(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x1, y1, x2, y2) format to (cx, cy, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format which will be converted.
Returns:
boxes (Tensor(N, 4)): boxes in (cx, cy, w, h) format.
"""
x1, y1, x2, y2 = boxes.unbind(-1)
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
boxes = torch.stack((cx, cy, w, h), dim=-1)
return boxes
def _box_xywh_to_xyxy(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x, y, w, h) format to (x1, y1, x2, y2) format.
(x, y) refers to top left of bounding box.
(w, h) refers to width and height of box.
Args:
boxes (Tensor[N, 4]): boxes in (x, y, w, h) which will be converted.
Returns:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format.
"""
x, y, w, h = boxes.unbind(-1)
boxes = torch.stack([x, y, x + w, y + h], dim=-1)
return boxes
def _box_xyxy_to_xywh(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x1, y1, x2, y2) format to (x, y, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) which will be converted.
Returns:
boxes (Tensor[N, 4]): boxes in (x, y, w, h) format.
"""
x1, y1, x2, y2 = boxes.unbind(-1)
w = x2 - x1 # x2 - x1
h = y2 - y1 # y2 - y1
boxes = torch.stack((x1, y1, w, h), dim=-1)
return boxes
|
import torch
from torch import Tensor
def _box_cxcywh_to_xyxy(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (cx, cy, w, h) format to (x1, y1, x2, y2) format.
(cx, cy) refers to center of bounding box
(w, h) are width and height of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (cx, cy, w, h) format which will be converted.
Returns:
boxes (Tensor(N, 4)): boxes in (x1, y1, x2, y2) format.
"""
# We need to change all 4 of them so some temporary variable is needed.
cx, cy, w, h = boxes.unbind(-1)
x1 = cx - 0.5 * w
y1 = cy - 0.5 * h
x2 = cx + 0.5 * w
y2 = cy + 0.5 * h
boxes = torch.stack((x1, y1, x2, y2), dim=-1)
return boxes
def _box_xyxy_to_cxcywh(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x1, y1, x2, y2) format to (cx, cy, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format which will be converted.
Returns:
boxes (Tensor(N, 4)): boxes in (cx, cy, w, h) format.
"""
x1, y1, x2, y2 = boxes.unbind(-1)
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
boxes = torch.stack((cx, cy, w, h), dim=-1)
return boxes
def _box_xywh_to_xyxy(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x, y, w, h) format to (x1, y1, x2, y2) format.
(x, y) refers to top left of bouding box.
(w, h) refers to width and height of box.
Args:
boxes (Tensor[N, 4]): boxes in (x, y, w, h) which will be converted.
Returns:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format.
"""
x, y, w, h = boxes.unbind(-1)
boxes = torch.stack([x, y, x + w, y + h], dim=-1)
return boxes
def _box_xyxy_to_xywh(boxes: Tensor) -> Tensor:
"""
Converts bounding boxes from (x1, y1, x2, y2) format to (x, y, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) which will be converted.
Returns:
boxes (Tensor[N, 4]): boxes in (x, y, w, h) format.
"""
x1, y1, x2, y2 = boxes.unbind(-1)
w = x2 - x1 # x2 - x1
h = y2 - y1 # y2 - y1
boxes = torch.stack((x1, y1, w, h), dim=-1)
return boxes
|
from collections import namedtuple
from typing import TYPE_CHECKING, Dict, NamedTuple, Optional
from urllib.parse import urlparse
if TYPE_CHECKING:
from docarray import DocumentArray
_ParsedHost = namedtuple('ParsedHost', 'on host port version scheme')
def _parse_host(host: str) -> NamedTuple:
"""Parse a host string into namedtuple object.
A parsed host's components are `on`, `host`, `port`, `version`, `scheme`.
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
"""
r = urlparse(host)
on = r.path or '/'
host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))._replace(path='').geturl()
)
port = r.port or None
version = None
scheme = r.scheme
splited_path = list(filter(None, r.path.split('/')))
if len(splited_path) == 2:
# path includes version and endpoint
version = splited_path[0]
host = host + '/' + version
on = '/' + splited_path[1]
return _ParsedHost(on=on, host=host, port=port, version=version, scheme=scheme)
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
parsed_host = _parse_host(host)
batch_size = batch_size or len(self)
scheme = parsed_host.scheme
host = parsed_host.host
if scheme in ('grpcs', 'https', 'wss'):
scheme = scheme[:-1]
if scheme == 'ws':
scheme = 'websocket' # temp fix for the core
if scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=host, **kwargs)
with f:
return f.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if parsed_host.port:
host += f':{parsed_host.port}'
c = Client(host=host)
return c.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
else:
raise ValueError(f'unsupported scheme: {scheme}')
|
from collections import namedtuple
from typing import TYPE_CHECKING, Dict, NamedTuple, Optional
from urllib.parse import urlparse
if TYPE_CHECKING:
from ... import DocumentArray
_ParsedHost = namedtuple('ParsedHost', 'on host port version scheme')
def _parse_host(host: str) -> NamedTuple:
"""Parse a host string into namedtuple object.
A parsed host's components are `on`, `host`, `port`, `version`, `scheme`.
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
"""
r = urlparse(host)
on = r.path or '/'
host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))._replace(path='').geturl()
)
port = r.port or None
version = None
scheme = r.scheme
splited_path = list(filter(None, r.path.split('/')))
if len(splited_path) == 2:
# path includes version and endpoint
version = splited_path[0]
host = host + '/' + version
on = '/' + splited_path[1]
return _ParsedHost(on=on, host=host, port=port, version=version, scheme=scheme)
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
parsed_host = _parse_host(host)
batch_size = batch_size or len(self)
scheme = parsed_host.scheme
host = parsed_host.host
if scheme in ('grpcs', 'https', 'wss'):
scheme = scheme[:-1]
if scheme == 'ws':
scheme = 'websocket' # temp fix for the core
if scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=host, **kwargs)
with f:
return f.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if parsed_host.port:
host += f':{parsed_host.port}'
c = Client(host=host)
return c.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
else:
raise ValueError(f'unsupported scheme: {scheme}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(
self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.schedulers:
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.schedulers:
if scheduler.by_epoch:
scheduler.step()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(
self,
runner: object,
data_batch: Optional[Sequence[Tuple[Any, BaseDataSample]]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner: object) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/adaptive_layer_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text
# columns and one similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, inner_train_loss)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="adaptive-layer-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
import logging
from datetime import datetime
import sys
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/adaptive_layer_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text
# columns and one similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, inner_train_loss)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="adaptive-layer-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
max_active_dims=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
max_active_dims=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
truncate_dim=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
"""
Computes embeddings
"""
from __future__ import annotations
import numpy as np
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_tiny_model: SentenceTransformer, normalize_embeddings: bool, prompt_name: str | None
) -> None:
model = stsb_bert_tiny_model
model.prompts = {"retrieval": "Represent this sentence for searching relevant passages: "}
sentences = [f"This is sentence {i}" for i in range(40)]
# Start the multi-process pool on e.g. two CPU devices & compute the embeddings using the pool
pool = model.start_multi_process_pool(["cpu", "cpu"])
emb = model.encode_multi_process(
sentences, pool, chunk_size=10, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name
)
model.stop_multi_process_pool(pool)
assert emb.shape == (len(sentences), 128)
# Make sure the embeddings aren't just all 0
assert emb.sum() != 0.0
# Compare against normal embeddings
emb_normal = model.encode(sentences, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name)
diff = np.max(np.abs(emb - emb_normal))
assert diff < 0.001
# Ensure that after normalizing, the means are all almost 0, and otherwise not
assert np.all(np.abs(emb.mean(1)) < 0.01) == normalize_embeddings
|
"""
Computes embeddings
"""
from __future__ import annotations
import numpy as np
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_tiny_model: SentenceTransformer, normalize_embeddings: bool, prompt_name: str | None
) -> None:
model = stsb_bert_tiny_model
model.prompts = {"retrieval": "Represent this sentence for searching relevant passages: "}
sentences = ["This is sentence {}".format(i) for i in range(40)]
# Start the multi-process pool on e.g. two CPU devices & compute the embeddings using the pool
pool = model.start_multi_process_pool(["cpu", "cpu"])
emb = model.encode_multi_process(
sentences, pool, chunk_size=10, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name
)
model.stop_multi_process_pool(pool)
assert emb.shape == (len(sentences), 128)
# Make sure the embeddings aren't just all 0
assert emb.sum() != 0.0
# Compare against normal embeddings
emb_normal = model.encode(sentences, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name)
diff = np.max(np.abs(emb - emb_normal))
assert diff < 0.001
# Ensure that after normalizing, the means are all almost 0, and otherwise not
assert np.all(np.abs(emb.mean(1)) < 0.01) == normalize_embeddings
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../../examples/sentence_transformer/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../../examples/sentence_transformer/training/distillation/README.html>`_
- `Training > Multilingual Models <../../../examples/sentence_transformer/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel
from backend.data.block import BlockInput
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
class BlockCost(BaseModel):
cost_amount: int
cost_filter: BlockInput
cost_type: BlockCostType
def __init__(
self,
cost_amount: int,
cost_type: BlockCostType = BlockCostType.RUN,
cost_filter: Optional[BlockInput] = None,
**data: Any,
) -> None:
super().__init__(
cost_amount=cost_amount,
cost_filter=cost_filter or {},
cost_type=cost_type,
**data,
)
|
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel
from backend.data.block import BlockInput
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
DOLLAR = "dollar" # cost X dollars per run
class BlockCost(BaseModel):
cost_amount: int
cost_filter: BlockInput
cost_type: BlockCostType
def __init__(
self,
cost_amount: int,
cost_type: BlockCostType = BlockCostType.RUN,
cost_filter: Optional[BlockInput] = None,
**data: Any,
) -> None:
super().__init__(
cost_amount=cost_amount,
cost_filter=cost_filter or {},
cost_type=cost_type,
**data,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# test with activate_map, `mask_pred` has been activated before
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
mask_pred = [m.sigmoid().detach() for m in mask_pred]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg,
activate_map=True)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# test with activate_map, `mask_pred` has been activated before
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
mask_pred = [m.sigmoid().detach() for m in mask_pred]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg,
activate_map=True)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
|
import collections
import json
import os
import string
from typing import Iterable, List
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
from typing import List, Iterable
import collections
import string
import os
import json
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.18.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import os
from typing import BinaryIO, Optional, Union
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = self.dataset.features.arrow_schema
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in logging.tqdm(
range(0, len(self.dataset), batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating parquet from Arrow format",
):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices if self.dataset._indices is not None else None,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written
|
import os
from typing import BinaryIO, Optional, Union
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = self.dataset.features.arrow_schema
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in logging.tqdm(
range(0, len(self.dataset), batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating parquet from Arrow format",
):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices if self.dataset._indices is not None else None,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v3 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet_v3 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v3 import decode_predictions
from keras.src.applications.mobilenet_v3 import preprocess_input
|
import logging
import os
import signal
import sys
from abc import ABC, abstractmethod
from multiprocessing import Process, get_all_start_methods, set_start_method
from typing import Optional
from backend.util.logging import configure_logging
from backend.util.metrics import sentry_init
logger = logging.getLogger(__name__)
_SERVICE_NAME = "MainProcess"
def get_service_name():
return _SERVICE_NAME
def set_service_name(name: str):
global _SERVICE_NAME
_SERVICE_NAME = name
class AppProcess(ABC):
"""
A class to represent an object that can be executed in a background process.
"""
process: Optional[Process] = None
cleaned_up = False
if "forkserver" in get_all_start_methods():
set_start_method("forkserver", force=True)
else:
logger.warning("Forkserver start method is not available. Using spawn instead.")
set_start_method("spawn", force=True)
configure_logging()
sentry_init()
# Methods that are executed INSIDE the process #
@abstractmethod
def run(self):
"""
The method that will be executed in the process.
"""
pass
@classmethod
@property
def service_name(cls) -> str:
return cls.__name__
@abstractmethod
def cleanup(self):
"""
Implement this method on a subclass to do post-execution cleanup,
e.g. disconnecting from a database or terminating child processes.
"""
pass
def health_check(self) -> str:
"""
A method to check the health of the process.
"""
return "OK"
def execute_run_command(self, silent):
signal.signal(signal.SIGTERM, self._self_terminate)
signal.signal(signal.SIGINT, self._self_terminate)
try:
if silent:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
set_service_name(self.service_name)
logger.info(f"[{self.service_name}] Starting...")
self.run()
except (KeyboardInterrupt, SystemExit) as e:
logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...")
finally:
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
logger.info(f"[{self.service_name}] Terminated.")
def _self_terminate(self, signum: int, frame):
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
sys.exit(0)
# Methods that are executed OUTSIDE the process #
def __enter__(self):
self.start(background=True)
return self
def __exit__(self, *args, **kwargs):
self.stop()
def start(self, background: bool = False, silent: bool = False, **proc_args) -> int:
"""
Start the background process.
Args:
background: Whether to run the process in the background.
silent: Whether to disable stdout and stderr.
proc_args: Additional arguments to pass to the process.
Returns:
the process id or 0 if the process is not running in the background.
"""
if not background:
self.execute_run_command(silent)
return 0
self.process = Process(
name=self.__class__.__name__,
target=self.execute_run_command,
args=(silent,),
**proc_args,
)
self.process.start()
self.health_check()
logger.info(f"[{self.service_name}] started with PID {self.process.pid}")
return self.process.pid or 0
def stop(self):
"""
Stop the background process.
"""
if not self.process:
return
self.process.terminate()
self.process.join()
logger.info(f"[{self.service_name}] with PID {self.process.pid} stopped")
self.process = None
|
import logging
import os
import signal
import sys
from abc import ABC, abstractmethod
from multiprocessing import Process, set_start_method
from typing import Optional
from backend.util.logging import configure_logging
from backend.util.metrics import sentry_init
logger = logging.getLogger(__name__)
_SERVICE_NAME = "MainProcess"
def get_service_name():
return _SERVICE_NAME
def set_service_name(name: str):
global _SERVICE_NAME
_SERVICE_NAME = name
class AppProcess(ABC):
"""
A class to represent an object that can be executed in a background process.
"""
process: Optional[Process] = None
cleaned_up = False
set_start_method("spawn", force=True)
configure_logging()
sentry_init()
# Methods that are executed INSIDE the process #
@abstractmethod
def run(self):
"""
The method that will be executed in the process.
"""
pass
@classmethod
@property
def service_name(cls) -> str:
return cls.__name__
@abstractmethod
def cleanup(self):
"""
Implement this method on a subclass to do post-execution cleanup,
e.g. disconnecting from a database or terminating child processes.
"""
pass
def health_check(self) -> str:
"""
A method to check the health of the process.
"""
return "OK"
def execute_run_command(self, silent):
signal.signal(signal.SIGTERM, self._self_terminate)
signal.signal(signal.SIGINT, self._self_terminate)
try:
if silent:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
set_service_name(self.service_name)
logger.info(f"[{self.service_name}] Starting...")
self.run()
except (KeyboardInterrupt, SystemExit) as e:
logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...")
finally:
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
logger.info(f"[{self.service_name}] Terminated.")
def _self_terminate(self, signum: int, frame):
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
sys.exit(0)
# Methods that are executed OUTSIDE the process #
def __enter__(self):
self.start(background=True)
return self
def __exit__(self, *args, **kwargs):
self.stop()
def start(self, background: bool = False, silent: bool = False, **proc_args) -> int:
"""
Start the background process.
Args:
background: Whether to run the process in the background.
silent: Whether to disable stdout and stderr.
proc_args: Additional arguments to pass to the process.
Returns:
the process id or 0 if the process is not running in the background.
"""
if not background:
self.execute_run_command(silent)
return 0
self.process = Process(
name=self.__class__.__name__,
target=self.execute_run_command,
args=(silent,),
**proc_args,
)
self.process.start()
self.health_check()
logger.info(f"[{self.service_name}] started with PID {self.process.pid}")
return self.process.pid or 0
def stop(self):
"""
Stop the background process.
"""
if not self.process:
return
self.process.terminate()
self.process.join()
logger.info(f"[{self.service_name}] with PID {self.process.pid} stopped")
self.process = None
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (autocast_box_type, convert_box_type, get_box_type,
register_box, register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'find_inside_bboxes', 'bbox2corner', 'corner2bbox', 'bbox_project',
'BaseBoxes', 'convert_box_type', 'get_box_type', 'register_box',
'register_box_converter', 'HorizontalBoxes', 'autocast_box_type'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (convert_box_type, get_box_type, register_box,
register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'find_inside_bboxes', 'bbox2corner', 'corner2bbox', 'bbox_project',
'BaseBoxes', 'convert_box_type', 'get_box_type', 'register_box',
'register_box_converter', 'HorizontalBoxes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage writer
WRITERS = Registry('writer')
|
from collections import defaultdict
from typing import TYPE_CHECKING, Optional
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
from docarray.proto.io.ndarray import flush_ndarray, read_ndarray
from docarray.proto.docarray_pb2 import NdArrayProto, DocumentProto
if TYPE_CHECKING:
from docarray import Document
def parse_proto(pb_msg: 'DocumentProto') -> 'Document':
from docarray import Document
from docarray.score import NamedScore
fields = {}
for (field, value) in pb_msg.ListFields():
f_name = field.name
if f_name == 'chunks' or f_name == 'matches':
fields[f_name] = [Document.from_protobuf(d) for d in value]
elif isinstance(value, NdArrayProto):
fields[f_name] = read_ndarray(value)
elif isinstance(value, Struct):
fields[f_name] = MessageToDict(value, preserving_proto_field_name=True)
elif f_name == 'location':
fields[f_name] = list(value)
elif f_name == 'scores' or f_name == 'evaluations':
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(
{ff.name: vv for (ff, vv) in v.ListFields()}
)
else:
fields[f_name] = value
return Document(**fields)
def flush_proto(doc: 'Document', ndarray_type: Optional[str] = None) -> 'DocumentProto':
pb_msg = DocumentProto()
for key in doc.non_empty_fields:
try:
value = getattr(doc, key)
if key in ('tensor', 'embedding'):
flush_ndarray(getattr(pb_msg, key), value, ndarray_type=ndarray_type)
elif key in ('chunks', 'matches'):
for d in value:
d: Document
docs = getattr(pb_msg, key)
docs.append(d.to_protobuf())
elif key == 'tags':
pb_msg.tags.update(value)
elif key == '_metadata':
pb_msg._metadata.update(value)
elif key in ('scores', 'evaluations'):
for kk, vv in value.items():
for ff in vv.non_empty_fields:
setattr(getattr(pb_msg, key)[kk], ff, getattr(vv, ff))
elif key == 'location':
pb_msg.location.extend(value)
elif key == 'content':
pass # intentionally ignore `content` field as it is just a proxy
else:
# other simple fields
setattr(pb_msg, key, value)
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
f'Field `{key}` contains cyclic reference in memory. '
f'Could it be your Document is referring to itself?',
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{key}` is problematic',) + ex.args
raise
return pb_msg
|
from collections import defaultdict
from typing import TYPE_CHECKING, Optional
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
from .ndarray import flush_ndarray, read_ndarray
from ..docarray_pb2 import NdArrayProto, DocumentProto
if TYPE_CHECKING:
from ... import Document
def parse_proto(pb_msg: 'DocumentProto') -> 'Document':
from ... import Document
from ...score import NamedScore
fields = {}
for (field, value) in pb_msg.ListFields():
f_name = field.name
if f_name == 'chunks' or f_name == 'matches':
fields[f_name] = [Document.from_protobuf(d) for d in value]
elif isinstance(value, NdArrayProto):
fields[f_name] = read_ndarray(value)
elif isinstance(value, Struct):
fields[f_name] = MessageToDict(value, preserving_proto_field_name=True)
elif f_name == 'location':
fields[f_name] = list(value)
elif f_name == 'scores' or f_name == 'evaluations':
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(
{ff.name: vv for (ff, vv) in v.ListFields()}
)
else:
fields[f_name] = value
return Document(**fields)
def flush_proto(doc: 'Document', ndarray_type: Optional[str] = None) -> 'DocumentProto':
pb_msg = DocumentProto()
for key in doc.non_empty_fields:
try:
value = getattr(doc, key)
if key in ('tensor', 'embedding'):
flush_ndarray(getattr(pb_msg, key), value, ndarray_type=ndarray_type)
elif key in ('chunks', 'matches'):
for d in value:
d: Document
docs = getattr(pb_msg, key)
docs.append(d.to_protobuf())
elif key == 'tags':
pb_msg.tags.update(value)
elif key == '_metadata':
pb_msg._metadata.update(value)
elif key in ('scores', 'evaluations'):
for kk, vv in value.items():
for ff in vv.non_empty_fields:
setattr(getattr(pb_msg, key)[kk], ff, getattr(vv, ff))
elif key == 'location':
pb_msg.location.extend(value)
elif key == 'content':
pass # intentionally ignore `content` field as it is just a proxy
else:
# other simple fields
setattr(pb_msg, key, value)
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
f'Field `{key}` contains cyclic reference in memory. '
f'Could it be your Document is referring to itself?',
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{key}` is problematic',) + ex.args
raise
return pb_msg
|
from backend.app import run_processes
from backend.executor import DatabaseManager, Scheduler
from backend.notifications.notifications import NotificationManager
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
NotificationManager(),
DatabaseManager(),
Scheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionScheduler
from backend.notifications.notifications import NotificationManager
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
NotificationManager(),
DatabaseManager(),
ExecutionScheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["AmadeusToolkit"]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["AmadeusToolkit"]
|
"""
ReAct agent.
Simple wrapper around AgentRunner + ReActAgentWorker.
For the legacy implementation see:
```python
from llama_index.core.agent.legacy.react.base import ReActAgent
```
"""
|
"""ReAct agent.
Simple wrapper around AgentRunner + ReActAgentWorker.
For the legacy implementation see:
```python
from llama_index.core.agent.legacy.react.base import ReActAgent
```
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.3.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.2.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
import torch
import torchvision.models.video as models
from jina import Document, DocumentArray, Executor
from torchvision import transforms
from ...video_torch_encoder import (
ConvertFCHWtoCFHW,
ConvertFHWCtoFCHW,
VideoTorchEncoder,
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=False)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=True)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(use_default_preprocessing=True, model_name=model_name)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import pytest
import torch
import numpy as np
import torchvision.models.video as models
from torchvision import transforms
from jina import Document, DocumentArray, Executor
from ...video_torch_encoder import (
VideoTorchEncoder,
ConvertFHWCtoFCHW,
ConvertFCHWtoCFHW,
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=False)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=True)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(use_default_preprocessing=True, model_name=model_name)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.text_embeddings_inference import TextEmbeddingsInference
def test_text_inference_embedding_class():
names_of_base_classes = [b.__name__ for b in TextEmbeddingsInference.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
def test_text_inference_embedding_init():
text_inference = TextEmbeddingsInference(
model_name="some-model",
base_url="some-url",
text_instruction="some-text-instruction",
query_instruction="some-query-instruction",
embed_batch_size=42,
timeout=42.0,
truncate_text=False,
auth_token="some-token",
endpoint="some-endpoint",
)
assert text_inference.model_name == "some-model"
assert text_inference.base_url == "some-url"
assert text_inference.text_instruction == "some-text-instruction"
assert text_inference.query_instruction == "some-query-instruction"
assert text_inference.embed_batch_size == 42
assert int(text_inference.timeout) == 42
assert text_inference.truncate_text is False
assert text_inference.auth_token == "some-token"
assert text_inference.endpoint == "some-endpoint"
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.text_embeddings_inference import TextEmbeddingsInference
def test_text_inference_embedding_class():
names_of_base_classes = [b.__name__ for b in TextEmbeddingsInference.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from mmengine.hooks import EMAHook
from mmengine.model import ExponentialMovingAverage
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS, MODEL_WRAPPERS
from mmengine.runner import Runner
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, data_batch, return_loss=False):
inputs, labels = [], []
for x in data_batch:
inputs.append(x['inputs'])
labels.append(x['data_sample'])
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
inputs = torch.stack(inputs).to(device)
labels = torch.stack(labels).to(device)
outputs = self.linear(inputs)
if return_loss:
loss = (labels - outputs).sum()
outputs = dict(loss=loss, log_vars=dict(loss=loss.item()))
return outputs
else:
outputs = dict(log_vars=dict(a=1, b=0.5))
return outputs
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestEMAHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_ema_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2),
val_cfg=dict(interval=1),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook', )],
experiment_name='test1')
runner.train()
for hook in runner.hooks:
if isinstance(hook, EMAHook):
self.assertTrue(
isinstance(hook.ema_model, ExponentialMovingAverage))
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
self.assertTrue('ema_state_dict' in checkpoint)
self.assertTrue(checkpoint['ema_state_dict']['steps'] == 8)
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(model),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test3')
runner.test()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from mmengine.hooks import EMAHook
from mmengine.model import ExponentialMovingAverage
from mmengine.registry import DATASETS, MODEL_WRAPPERS
from mmengine.runner import Runner
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, data_batch, return_loss=False):
inputs, labels = [], []
for x in data_batch:
inputs.append(x['inputs'])
labels.append(x['data_sample'])
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
inputs = torch.stack(inputs).to(device)
labels = torch.stack(labels).to(device)
outputs = self.linear(inputs)
if return_loss:
loss = (labels - outputs).sum()
outputs = dict(loss=loss, log_vars=dict(loss=loss.item()))
return outputs
else:
outputs = dict(log_vars=dict(a=1, b=0.5))
return outputs
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestEMAHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_ema_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
optimizer=torch.optim.Adam(ToyModel().parameters()),
train_cfg=dict(by_epoch=True, max_epochs=2),
val_cfg=dict(interval=1),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook', )],
experiment_name='test1')
runner.train()
for hook in runner.hooks:
if isinstance(hook, EMAHook):
self.assertTrue(
isinstance(hook.ema_model, ExponentialMovingAverage))
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
self.assertTrue('ema_state_dict' in checkpoint)
self.assertTrue(checkpoint['ema_state_dict']['steps'] == 8)
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(model),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test3')
runner.test()
|
import collections
import torch
from torch.utils._ordered_set import OrderedSet
def _end_ptr(tensor: torch.Tensor) -> int:
if tensor.nelement():
stop = tensor.view(-1)[-1].data_ptr() + tensor.element_size()
else:
stop = tensor.data_ptr()
return stop
class TensorProperties:
def __init__(self, tensor: torch.Tensor):
# info about underlying storage
self.storage_ptr = tensor.untyped_storage().data_ptr()
self.storage_size = tensor.untyped_storage().nbytes()
# info to recover tensor
self.shape = tensor.shape
self.stride = tensor.stride()
self.offset = tensor.storage_offset()
self.start = tensor.data_ptr()
self.end = _end_ptr(tensor)
def is_complete(self) -> bool:
"""
Whether the tensor completely overlaps with its underlying storage
"""
return (
self.start == self.storage_ptr
and self.end == self.storage_ptr + self.storage_size
)
class Weights(dict):
"""
A dictionary mapping from weight name to a tuple of (tensor, TensorProperties).
tensor represents the actual initial value of the weight.
TensorProperties represents the properties of the weight that are needed to recover the weight.
We use two separate entries because `tensor` could be a clone of the original weight tensor,
so it doesn't have the same property as the original weight (such as underlying storage pointer).
"""
def __init__(self, weight_dict: dict[str, tuple[torch.Tensor, TensorProperties]]):
super().__init__(weight_dict)
def get_weight(self, name: str) -> tuple[torch.Tensor, TensorProperties]:
return self[name]
def get_weight_properties(self, name: str) -> TensorProperties:
return self[name][1]
def get_complete(
group: OrderedSet[tuple[str, str]], models_weights: dict[str, Weights]
) -> tuple[str, str]:
"""
`group` is a (model_name, weight_name) tuple.
`model_weights` is a dictionary mapping from model name to its Weights.
One of the tensor in `group` must be complete and they must share the
same underlying storage.
Returns the name of the complete tensor in the `group`. If multiple
tensors are complete, returns an arbitrary one.
"""
def get_tensor_properties(name_tuple: tuple[str, str]) -> TensorProperties:
# returns the tensor properties
(model_name, weight_name) = name_tuple
return models_weights[model_name].get_weight_properties(weight_name)
for name_tuple in group:
tensor_property = get_tensor_properties(name_tuple)
if tensor_property.is_complete():
return name_tuple
raise RuntimeError("No complete tensor found in the group!")
def group_weights(all_weights: dict[str, Weights]) -> list[OrderedSet[tuple[str, str]]]:
"""
Group weights that share the same underlying storage.
Returns a list of sets, each set contains a tuple of (model_name, weight_name).
"""
weights_dict: dict[int, OrderedSet[tuple[str, str]]] = collections.defaultdict(
OrderedSet
) # storage_key -> set(weight)
for model_name, weights in all_weights.items():
for weight_name, (_, properties) in weights.items():
weights_dict[properties.storage_ptr].add((model_name, weight_name))
return list(weights_dict.values())
|
import collections
import torch
from torch.utils._ordered_set import OrderedSet
def _end_ptr(tensor: torch.Tensor) -> int:
if tensor.nelement():
stop = tensor.view(-1)[-1].data_ptr() + tensor.element_size()
else:
stop = tensor.data_ptr()
return stop
class TensorProperties:
def __init__(self, tensor: torch.Tensor):
# info about underlying storage
self.storage_ptr = tensor.untyped_storage().data_ptr()
self.storage_size = tensor.untyped_storage().nbytes()
# info to recover tensor
self.shape = tensor.shape
self.stride = tensor.stride()
self.offset = tensor.storage_offset()
self.start = tensor.data_ptr()
self.end = _end_ptr(tensor)
def is_complete(self) -> bool:
"""
Whehter the tensor completely overlaps with its underlying storage
"""
return (
self.start == self.storage_ptr
and self.end == self.storage_ptr + self.storage_size
)
class Weights(dict):
"""
A dictionary mapping from weight name to a tuple of (tensor, TensorProperties).
tensor represents the actual intial value of the weight.
TensorProperties represents the properties of the weight that are needed to recover the weight.
We use two separate entries because `tensor` could be a clone of the original weight tensor,
so it doesn't have the same property as the original weight (such as underlying storage pointer).
"""
def __init__(self, weight_dict: dict[str, tuple[torch.Tensor, TensorProperties]]):
super().__init__(weight_dict)
def get_weight(self, name: str) -> tuple[torch.Tensor, TensorProperties]:
return self[name]
def get_weight_properties(self, name: str) -> TensorProperties:
return self[name][1]
def get_complete(
group: OrderedSet[tuple[str, str]], models_weights: dict[str, Weights]
) -> tuple[str, str]:
"""
`group` is a (model_name, weight_name) tuple.
`model_weights` is a dictionary mapping from model name to its Weights.
One of the tensor in `group` must be complete and they must share the
same underlying storage.
Returns the name of the complete tensor in the `group`. If multiple
tensors are complete, returns an arbitrary one.
"""
def get_tensor_properties(name_tuple: tuple[str, str]) -> TensorProperties:
# returns the tensor properties
(model_name, weight_name) = name_tuple
return models_weights[model_name].get_weight_properties(weight_name)
for name_tuple in group:
tensor_property = get_tensor_properties(name_tuple)
if tensor_property.is_complete():
return name_tuple
raise RuntimeError("No complete tensor found in the group!")
def group_weights(all_weights: dict[str, Weights]) -> list[OrderedSet[tuple[str, str]]]:
"""
Group weights that share the same underlying storage.
Returns a list of sets, each set contains a tuple of (model_name, weight_name).
"""
weights_dict: dict[int, OrderedSet[tuple[str, str]]] = collections.defaultdict(
OrderedSet
) # storage_key -> set(weight)
for model_name, weights in all_weights.items():
for weight_name, (_, properties) in weights.items():
weights_dict[properties.storage_ptr].add((model_name, weight_name))
return list(weights_dict.values())
|
from typing import Union
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self):
return "CLIPModel()"
def forward(self, features):
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True):
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
if len(texts_values) == 0:
texts_values = None
if len(images) == 0:
images = None
inputs = self.processor(text=texts_values, images=images, return_tensors="pt", padding=padding)
inputs["image_text_info"] = image_text_info
return inputs
def save(self, output_path: str):
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str):
return CLIPModel(model_name=input_path)
|
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self):
return "CLIPModel()"
def forward(self, features):
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts):
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
if len(texts_values) == 0:
texts_values = None
if len(images) == 0:
images = None
inputs = self.processor(text=texts_values, images=images, return_tensors="pt", padding=True)
inputs["image_text_info"] = image_text_info
return inputs
def save(self, output_path: str):
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str):
return CLIPModel(model_name=input_path)
|
import sys
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.asyncio()
async def test_get_graph_url(monkeypatch):
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Mock logging to graphistry
def mock_graphistry_return(username, password):
return True
import graphistry
monkeypatch.setattr(graphistry, "login", mock_graphistry_return)
# Mock render of graph
async def mock_render_return(graph):
return "link"
from cognee.shared import utils
monkeypatch.setattr(utils, "render_graph", mock_render_return)
await cogneeRAG.get_graph_url("password", "username")
from cognee.base_config import get_base_config
assert get_base_config().graphistry_password == "password", (
"Password was not set properly"
)
assert get_base_config().graphistry_username == "username", (
"Username was not set properly"
)
|
import asyncio
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.asyncio()
async def test_get_graph_url(monkeypatch):
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Mock logging to graphistry
def mock_graphistry_return(username, password):
return True
import graphistry
monkeypatch.setattr(graphistry, "login", mock_graphistry_return)
# Mock render of graph
async def mock_render_return(graph):
return "link"
from cognee.shared import utils
monkeypatch.setattr(utils, "render_graph", mock_render_return)
await cogneeRAG.get_graph_url("password", "username")
from cognee.base_config import get_base_config
assert (
get_base_config().graphistry_password == "password"
), "Password was not set properly"
assert (
get_base_config().graphistry_username == "username"
), "Username was not set properly"
if __name__ == "__main__":
asyncio.run(test_get_graph_url())
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./centernet_tta.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision useable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(
f"Failed ResNet50 classify {category_name} Expected: {expected_category}"
)
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision useable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_resnet50_classify() -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg"))
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name}: {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if __name__ == "__main__":
main()
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Audio
from docarray.typing import AudioUrl
from docarray.typing.tensor.audio import AudioNdArray, AudioTorchTensor
from tests import TOYDATA_DIR
LOCAL_AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_audio(file_url):
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_save_audio_ndarray(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
assert isinstance(audio.tensor, AudioNdArray)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = audio_from_file.url.load()
assert np.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_save_audio_torch_tensor(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = parse_obj_as(AudioTorchTensor, torch.from_numpy(audio.url.load()))
assert isinstance(audio.tensor, torch.Tensor)
assert isinstance(audio.tensor, AudioTorchTensor)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = parse_obj_as(
AudioTorchTensor, torch.from_numpy(audio_from_file.url.load())
)
assert torch.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
LOCAL_AUDIO_FILES,
)
def test_extend_audio(file_url):
class MyAudio(Audio):
title: str
tensor: Optional[AudioNdArray]
my_audio = MyAudio(title='my extended audio', url=file_url)
my_audio.tensor = parse_obj_as(AudioNdArray, my_audio.url.load())
assert isinstance(my_audio.tensor, AudioNdArray)
assert isinstance(my_audio.url, AudioUrl)
def test_audio_np():
audio = parse_obj_as(Audio, np.zeros((10, 10, 3)))
assert (audio.tensor == np.zeros((10, 10, 3))).all()
def test_audio_torch():
audio = parse_obj_as(Audio, torch.zeros(10, 10, 3))
assert (audio.tensor == torch.zeros(10, 10, 3)).all()
def test_audio_bytes():
audio = parse_obj_as(Audio, torch.zeros(10, 10, 3))
audio.bytes = audio.tensor.to_bytes()
def test_audio_shortcut_doc():
class MyDoc(BaseDocument):
audio: Audio
audio2: Audio
audio3: Audio
doc = MyDoc(
audio='http://myurl.wav',
audio2=np.zeros((10, 10, 3)),
audio3=torch.zeros(10, 10, 3),
)
assert doc.audio.url == 'http://myurl.wav'
assert (doc.audio2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.audio3.tensor == torch.zeros(10, 10, 3)).all()
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Audio
from docarray.typing import AudioUrl
from docarray.typing.tensor.audio import AudioNdArray, AudioTorchTensor
from tests import TOYDATA_DIR
LOCAL_AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_audio(file_url):
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_save_audio_ndarray(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
assert isinstance(audio.tensor, AudioNdArray)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = audio_from_file.url.load()
assert np.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_save_audio_torch_tensor(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = parse_obj_as(AudioTorchTensor, torch.from_numpy(audio.url.load()))
assert isinstance(audio.tensor, torch.Tensor)
assert isinstance(audio.tensor, AudioTorchTensor)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = parse_obj_as(
AudioTorchTensor, torch.from_numpy(audio_from_file.url.load())
)
assert torch.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_extend_audio(file_url):
class MyAudio(Audio):
title: str
tensor: Optional[AudioNdArray]
my_audio = MyAudio(title='my extended audio', url=file_url)
my_audio.tensor = parse_obj_as(AudioNdArray, my_audio.url.load())
assert isinstance(my_audio.tensor, AudioNdArray)
assert isinstance(my_audio.url, AudioUrl)
def test_audio_np():
audio = parse_obj_as(Audio, np.zeros((10, 10, 3)))
assert (audio.tensor == np.zeros((10, 10, 3))).all()
def test_audio_torch():
audio = parse_obj_as(Audio, torch.zeros(10, 10, 3))
assert (audio.tensor == torch.zeros(10, 10, 3)).all()
def test_audio_shortcut_doc():
class MyDoc(BaseDocument):
audio: Audio
audio2: Audio
audio3: Audio
doc = MyDoc(
audio='http://myurl.wav',
audio2=np.zeros((10, 10, 3)),
audio3=torch.zeros(10, 10, 3),
)
assert doc.audio.url == 'http://myurl.wav'
assert (doc.audio2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.audio3.tensor == torch.zeros(10, 10, 3)).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptMultiConfig
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .test_time_aug import BaseTTAModel
from .utils import (convert_sync_batchnorm, detect_anomalous_params,
merge_dict, revert_sync_batchnorm, stack_batch)
from .weight_init import (BaseInit, Caffe2XavierInit, ConstantInit,
KaimingInit, NormalInit, PretrainedInit,
TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init,
constant_init, initialize, kaiming_init, normal_init,
trunc_normal_init, uniform_init, update_init_info,
xavier_init)
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential', 'revert_sync_batchnorm', 'update_init_info',
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
'bias_init_with_prob', 'BaseInit', 'ConstantInit', 'XavierInit',
'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit',
'Caffe2XavierInit', 'PretrainedInit', 'initialize',
'convert_sync_batchnorm', 'BaseTTAModel'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import (convert_sync_batchnorm, detect_anomalous_params,
merge_dict, revert_sync_batchnorm, stack_batch)
from .weight_init import (BaseInit, Caffe2XavierInit, ConstantInit,
KaimingInit, NormalInit, PretrainedInit,
TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init,
constant_init, initialize, kaiming_init, normal_init,
trunc_normal_init, uniform_init, update_init_info,
xavier_init)
from .wrappers import (BaseTTAModel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential', 'revert_sync_batchnorm', 'update_init_info',
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
'bias_init_with_prob', 'BaseInit', 'ConstantInit', 'XavierInit',
'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit',
'Caffe2XavierInit', 'PretrainedInit', 'initialize',
'convert_sync_batchnorm', 'BaseTTAModel'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
casted = left.doc_type(**doc.__dict__)
left.append(casted)
return left
def reduce_all(docs: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
!!! note
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docs: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docs) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docs[0]
others = docs[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for other_docs in others:
reduce(left, other_docs, left_id_map)
return left
|
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
left.append(doc)
return left
def reduce_all(docs: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
!!! note
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docs: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docs) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docs[0]
others = docs[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for other_docs in others:
reduce(left, other_docs, left_id_map)
return left
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=20))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=20))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
# actual epoch = 3 * 3 = 9
lr_config = dict(policy='step', step=[3])
# runtime settings
runner = dict(
type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import typing
import fastapi
import fastapi.middleware.cors
import fastapi.responses
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
yield
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
app.add_middleware(
fastapi.middleware.cors.CORSMiddleware,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
@app.exception_handler(Exception)
def handle_internal_http_error(request: fastapi.Request, exc: Exception):
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"error": str(exc),
},
status_code=500,
)
class AgentServer(backend.util.service.AppProcess):
def run(self):
uvicorn.run(
app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=20))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=20))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
"""dad_jokes reader."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class DadJokesReader(BaseReader):
"""
Dad jokes reader.
Reads a random dad joke.
"""
def _get_random_dad_joke(self):
response = requests.get(
"https://icanhazdadjoke.com/", headers={"Accept": "application/json"}
)
response.raise_for_status()
json_data = response.json()
return json_data["joke"]
def load_data(self) -> List[Document]:
"""
Return a random dad joke.
Args:
None.
"""
return [Document(text=self._get_random_dad_joke())]
|
"""dad_jokes reader."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class DadJokesReader(BaseReader):
"""Dad jokes reader.
Reads a random dad joke.
"""
def _get_random_dad_joke(self):
response = requests.get(
"https://icanhazdadjoke.com/", headers={"Accept": "application/json"}
)
response.raise_for_status()
json_data = response.json()
return json_data["joke"]
def load_data(self) -> List[Document]:
"""Return a random dad joke.
Args:
None.
"""
return [Document(text=self._get_random_dad_joke())]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fused_semantic_head import FusedSemanticHead
@MODELS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetSemanticHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fused_semantic_head import FusedSemanticHead
@HEADS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetSemanticHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
from __future__ import annotations
from copy import deepcopy
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture(scope="session")
def _splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def splade_bert_tiny_model(_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _inference_free_splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def inference_free_splade_bert_tiny_model(_inference_free_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_inference_free_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _csr_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model[-1].k = 16
model[-1].k_aux = 32
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def csr_bert_tiny_model(_csr_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_csr_bert_tiny_model)
|
from __future__ import annotations
from copy import deepcopy
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture(scope="session")
def _splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def splade_bert_tiny_model(_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _inference_free_splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def inference_free_splade_bert_tiny_model(_inference_free_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_inference_free_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _csr_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def csr_bert_tiny_model(_csr_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_csr_bert_tiny_model)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class HubSpotCompanyBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
company_data: dict = SchemaField(
description="Company data for create/update operations",
default_factory=dict,
)
domain: str = SchemaField(
description="Company domain for get/update operations", default=""
)
class Output(BlockSchema):
company: dict = SchemaField(description="Company information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
description="Manages HubSpot companies - create, update, and retrieve company information",
categories={BlockCategory.CRM},
input_schema=HubSpotCompanyBlock.Input,
output_schema=HubSpotCompanyBlock.Output,
)
async def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = await Requests().post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
yield "company", result
yield "status", "created"
elif input_data.operation == "get":
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
}
search_response = await Requests().post(
search_url, headers=headers, json=search_data
)
search_result = search_response.json()
yield "search_company", search_result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = await Requests().post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
},
)
search_result = search_response.json()
company_id = search_result.get("results", [{}])[0].get("id")
if company_id:
response = await Requests().patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},
)
result = response.json()
yield "company", result
yield "status", "updated"
else:
yield "company", {}
yield "status", "company_not_found"
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class HubSpotCompanyBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
company_data: dict = SchemaField(
description="Company data for create/update operations",
default_factory=dict,
)
domain: str = SchemaField(
description="Company domain for get/update operations", default=""
)
class Output(BlockSchema):
company: dict = SchemaField(description="Company information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
description="Manages HubSpot companies - create, update, and retrieve company information",
categories={BlockCategory.CRM},
input_schema=HubSpotCompanyBlock.Input,
output_schema=HubSpotCompanyBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = Requests().post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
yield "company", result
yield "status", "created"
elif input_data.operation == "get":
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
}
response = Requests().post(search_url, headers=headers, json=search_data)
result = response.json()
yield "company", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = Requests().post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
},
)
company_id = search_response.json().get("results", [{}])[0].get("id")
if company_id:
response = Requests().patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},
)
result = response.json()
yield "company", result
yield "status", "updated"
else:
yield "company", {}
yield "status", "company_not_found"
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity: Active Dimensions: 63.1, Sparsity Ratio: 0.9979
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity Stats: Row Non-Zero Mean: 63.13884735107422, Row Sparsity Mean: 0.9979313611984253
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageCaptionReader(BaseReader):
"""
Image parser.
Caption image using Blip.
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
prompt: Optional[str] = None,
):
"""Init params."""
if parser_config is None:
"""Init parser."""
try:
import sentencepiece # noqa
import torch
from PIL import Image # noqa
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers sentencepiece Pillow`"
)
device = infer_torch_device()
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
processor = BlipProcessor.from_pretrained(
"Salesforce/blip-image-captioning-large"
)
model = BlipForConditionalGeneration.from_pretrained(
"Salesforce/blip-image-captioning-large", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
self._parser_config = parser_config
self._keep_image = keep_image
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
out = model.generate(**inputs)
text_str = processor.decode(out[0], skip_special_tokens=True)
return [
ImageDocument(
text=text_str,
image=image_str,
image_path=str(file),
metadata=extra_info or {},
)
]
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageCaptionReader(BaseReader):
"""Image parser.
Caption image using Blip.
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
prompt: Optional[str] = None,
):
"""Init params."""
if parser_config is None:
"""Init parser."""
try:
import sentencepiece # noqa
import torch
from PIL import Image # noqa
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers sentencepiece Pillow`"
)
device = infer_torch_device()
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
processor = BlipProcessor.from_pretrained(
"Salesforce/blip-image-captioning-large"
)
model = BlipForConditionalGeneration.from_pretrained(
"Salesforce/blip-image-captioning-large", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
self._parser_config = parser_config
self._keep_image = keep_image
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
out = model.generate(**inputs)
text_str = processor.decode(out[0], skip_special_tokens=True)
return [
ImageDocument(
text=text_str,
image=image_str,
image_path=str(file),
metadata=extra_info or {},
)
]
|
"""Standard LangChain interface tests"""
from typing import Optional
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
# Initialize the rate limiter in global scope, so it can be re-used
# across tests.
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.5,
)
# Not using Grok 4 since it doesn't support reasoning params (effort) or returns
# reasoning content.
class TestXAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
# TODO: bump to test new Grok once they implement other features
return {
"model": "grok-3",
"rate_limiter": rate_limiter,
"stream_usage": True,
}
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatXAI(
model="grok-3-mini",
reasoning_effort="low",
)
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
# Test streaming
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
def test_web_search() -> None:
llm = ChatXAI(
model="grok-3",
search_parameters={"mode": "auto", "max_search_results": 3},
)
# Test invoke
response = llm.invoke("Provide me a digest of world news in the last 24 hours.")
assert response.content
assert response.additional_kwargs["citations"]
assert len(response.additional_kwargs["citations"]) <= 3
# Test streaming
full = None
for chunk in llm.stream("Provide me a digest of world news in the last 24 hours."):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["citations"]
assert len(full.additional_kwargs["citations"]) <= 3
|
"""Standard LangChain interface tests"""
from typing import Optional
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
# Initialize the rate limiter in global scope, so it can be re-used
# across tests.
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.5,
)
class TestXAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
return {
"model": "grok-3",
"rate_limiter": rate_limiter,
"stream_usage": True,
}
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatXAI(
model="grok-3-mini-beta",
reasoning_effort="low",
)
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
# Test streaming
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
def test_web_search() -> None:
llm = ChatXAI(
model="grok-3-latest",
search_parameters={"mode": "auto", "max_search_results": 3},
)
# Test invoke
response = llm.invoke("Provide me a digest of world news in the last 24 hours.")
assert response.content
assert response.additional_kwargs["citations"]
assert len(response.additional_kwargs["citations"]) <= 3
# Test streaming
full = None
for chunk in llm.stream("Provide me a digest of world news in the last 24 hours."):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["citations"]
assert len(full.additional_kwargs["citations"]) <= 3
|
"""Notebook utils."""
from collections import defaultdict
from typing import Any, List, Optional, Tuple
from llama_index.core.evaluation import EvaluationResult
from llama_index.core.evaluation.retrieval.base import RetrievalEvalResult
DEFAULT_METRIC_KEYS = ["hit_rate", "mrr"]
def get_retrieval_results_df(
names: List[str],
results_arr: List[List[RetrievalEvalResult]],
metric_keys: Optional[List[str]] = None,
) -> Any:
"""Display retrieval results."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is required for this function. Please install it with `pip install pandas`."
)
metric_keys = metric_keys or DEFAULT_METRIC_KEYS
avg_metrics_dict = defaultdict(list)
for name, eval_results in zip(names, results_arr):
metric_dicts = []
for eval_result in eval_results:
metric_dict = eval_result.metric_vals_dict
metric_dicts.append(metric_dict)
results_df = pd.DataFrame(metric_dicts)
for metric_key in metric_keys:
if metric_key not in results_df.columns:
raise ValueError(f"Metric key {metric_key} not in results_df")
avg_metrics_dict[metric_key].append(results_df[metric_key].mean())
return pd.DataFrame({"retrievers": names, **avg_metrics_dict})
def get_eval_results_df(
names: List[str], results_arr: List[EvaluationResult], metric: Optional[str] = None
) -> Tuple[Any, Any]:
"""
Organizes EvaluationResults into a deep dataframe and computes the mean
score.
result:
result_df: pd.DataFrame representing all the evaluation results
mean_df: pd.DataFrame of average scores groupby names
"""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is required for this function. Please install it with `pip install pandas`."
)
if len(names) != len(results_arr):
raise ValueError("names and results_arr must have same length.")
qs = []
ss = []
fs = []
rs = []
cs = []
for res in results_arr:
qs.append(res.query)
ss.append(res.score)
fs.append(res.feedback)
rs.append(res.response)
cs.append(res.contexts)
deep_df = pd.DataFrame(
{
"rag": names,
"query": qs,
"answer": rs,
"contexts": cs,
"scores": ss,
"feedbacks": fs,
}
)
mean_df = pd.DataFrame(deep_df.groupby(["rag"])["scores"].mean()).T
if metric:
mean_df.index = [f"mean_{metric}_score"]
return deep_df, mean_df
|
"""Notebook utils."""
from collections import defaultdict
from typing import Any, List, Optional, Tuple
from llama_index.core.evaluation import EvaluationResult
from llama_index.core.evaluation.retrieval.base import RetrievalEvalResult
DEFAULT_METRIC_KEYS = ["hit_rate", "mrr"]
def get_retrieval_results_df(
names: List[str],
results_arr: List[List[RetrievalEvalResult]],
metric_keys: Optional[List[str]] = None,
) -> Any:
"""Display retrieval results."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is required for this function. Please install it with `pip install pandas`."
)
metric_keys = metric_keys or DEFAULT_METRIC_KEYS
avg_metrics_dict = defaultdict(list)
for name, eval_results in zip(names, results_arr):
metric_dicts = []
for eval_result in eval_results:
metric_dict = eval_result.metric_vals_dict
metric_dicts.append(metric_dict)
results_df = pd.DataFrame(metric_dicts)
for metric_key in metric_keys:
if metric_key not in results_df.columns:
raise ValueError(f"Metric key {metric_key} not in results_df")
avg_metrics_dict[metric_key].append(results_df[metric_key].mean())
return pd.DataFrame({"retrievers": names, **avg_metrics_dict})
def get_eval_results_df(
names: List[str], results_arr: List[EvaluationResult], metric: Optional[str] = None
) -> Tuple[Any, Any]:
"""Organizes EvaluationResults into a deep dataframe and computes the mean
score.
result:
result_df: pd.DataFrame representing all the evaluation results
mean_df: pd.DataFrame of average scores groupby names
"""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is required for this function. Please install it with `pip install pandas`."
)
if len(names) != len(results_arr):
raise ValueError("names and results_arr must have same length.")
qs = []
ss = []
fs = []
rs = []
cs = []
for res in results_arr:
qs.append(res.query)
ss.append(res.score)
fs.append(res.feedback)
rs.append(res.response)
cs.append(res.contexts)
deep_df = pd.DataFrame(
{
"rag": names,
"query": qs,
"answer": rs,
"contexts": cs,
"scores": ss,
"feedbacks": fs,
}
)
mean_df = pd.DataFrame(deep_df.groupby(["rag"])["scores"].mean()).T
if metric:
mean_df.index = [f"mean_{metric}_score"]
return deep_df, mean_df
|
import functools
import warnings
from collections import defaultdict
from collections.abc import Sequence
from typing import Any, Optional, TypeVar, Union
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import is_pure_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[Sequence[int], dict[type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def transform(self, inpt: Any, params: dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[tuple[int, int], dict[type, Optional[tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def transform(self, inpt: Any, params: dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import is_pure_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestTridentRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'tridentnet/tridentnet_r50_caffe_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_shared_head)
def test_trident_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head_cfg = copy.deepcopy(self.roi_head_cfg)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
# When `test_branch_idx == 1`
roi_head.predict(feats, proposals_list, batch_data_samples)
# When `test_branch_idx == -1`
roi_head_cfg.test_branch_idx = -1
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
roi_head.predict(feats, proposals_list, batch_data_samples)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestTridentRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'tridentnet/tridentnet_r50_caffe_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_shared_head)
def test_trident_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head_cfg = copy.deepcopy(self.roi_head_cfg)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
proposals_list[i] = proposals_list[i].to(device='cuda')
# When `test_branch_idx == 1`
roi_head.predict(feats, proposals_list, batch_data_samples)
# When `test_branch_idx == -1`
roi_head_cfg.test_branch_idx = -1
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
roi_head.predict(feats, proposals_list, batch_data_samples)
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadServerFlow(Exception, BaseJinaException):
"""A wrongly defined Flow on the server side"""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadServer(Exception, BaseJinaException):
"""Error happens on the server side."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when trying to use a port which is already used"""
class EstablishGrpcConnectionError(Exception, BaseJinaException):
"""Raised when Exception occurs when establishing or resetting gRPC connection"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
return self._details if self._details else self.og_exception.details()
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadServer(Exception, BaseJinaException):
"""Error happens on the server side."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when trying to use a port which is already used"""
class EstablishGrpcConnectionError(Exception, BaseJinaException):
"""Raised when Exception occurs when establishing or resetting gRPC connection"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
return self._details if self._details else self.og_exception.details()
|
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from sentencizer import Sentencizer
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.min_sent_len == 1
@pytest.mark.parametrize('traversal_paths', [('r',), ('c',)])
def test_executor(traversal_paths):
ex = Sentencizer(traversal_paths=traversal_paths)
doc = Document(text='Hello. World! Go? Back')
if 'c' in traversal_paths:
da = DocumentArray([Document(chunks=[doc])])
else:
da = DocumentArray([doc])
ex.segment(da, {})
flattened_docs = da.traverse_flat(traversal_paths)
assert len(flattened_docs) == 1
assert len(flattened_docs[0].chunks) == 4
assert flattened_docs[0].chunks[0].text == 'Hello.'
assert flattened_docs[0].chunks[1].text == 'World!'
assert flattened_docs[0].chunks[2].text == 'Go?'
assert flattened_docs[0].chunks[3].text == 'Back'
def test_executor_with_punct_chars():
ex = Sentencizer(punct_chars=['.'])
da = DocumentArray([Document(text='Hello. World! Go? Back')])
ex.segment(da, {})
assert len(da) == 1
assert len(da[0].chunks) == 2
assert da[0].chunks[0].text == 'Hello.'
assert da[0].chunks[1].text == 'World! Go? Back'
def test_executor_with_max_sent_length():
ex = Sentencizer(punct_chars=['.'], max_sent_len=3)
da = DocumentArray([Document(text='Hello. World')])
ex.segment(da, {})
assert len(da) == 1
assert len(da[0].chunks) == 2
assert da[0].chunks[0].text == 'Hel'
assert da[0].chunks[1].text == 'Wor'
def test_executor_empty_input():
ex = Sentencizer()
da = DocumentArray()
ex.segment(da, {})
assert len(da) == 0
def test_executor_none_input():
ex = Sentencizer()
ex.segment(None, {})
|
from pathlib import Path
from jina import Document, DocumentArray, Executor
from sentencizer import Sentencizer
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.min_sent_len == 1
def test_executor():
ex = Sentencizer()
input = DocumentArray([Document(text='Hello. World.')])
ex.segment(input, {})
assert input[0].chunks[0].text == 'Hello.'
assert input[0].chunks[1].text == 'World.'
|
"""Test volc engine maas LLM model."""
from typing import Generator
from langchain_core.outputs import LLMResult
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.volcengine_maas import (
VolcEngineMaasBase,
VolcEngineMaasLLM,
)
def test_api_key_is_string() -> None:
llm = VolcEngineMaasBase(
volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type]
volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type]
)
assert isinstance(llm.volc_engine_maas_ak, SecretStr)
assert isinstance(llm.volc_engine_maas_sk, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = VolcEngineMaasBase(
volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type]
volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type]
)
print(llm.volc_engine_maas_ak, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_default_call() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm.invoke("tell me a joke")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm.generate(["tell me a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_generate_stream() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM(streaming=True)
output = llm.stream("tell me a joke")
assert isinstance(output, Generator)
|
"""Test volc engine maas LLM model."""
from typing import Generator
from langchain_core.outputs import LLMResult
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.volcengine_maas import (
VolcEngineMaasBase,
VolcEngineMaasLLM,
)
def test_api_key_is_string() -> None:
llm = VolcEngineMaasBase( # type: ignore[call-arg]
volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type]
volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type]
)
assert isinstance(llm.volc_engine_maas_ak, SecretStr)
assert isinstance(llm.volc_engine_maas_sk, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = VolcEngineMaasBase( # type: ignore[call-arg]
volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type]
volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type]
)
print(llm.volc_engine_maas_ak, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_default_call() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM() # type: ignore[call-arg]
output = llm.invoke("tell me a joke")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM() # type: ignore[call-arg]
output = llm.generate(["tell me a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_generate_stream() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM(streaming=True) # type: ignore[call-arg]
output = llm.stream("tell me a joke")
assert isinstance(output, Generator)
|
from langchain_core.runnables.config import (
EmptyDict,
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
get_config_list,
get_executor_for_config,
merge_configs,
patch_config,
)
__all__ = [
"EmptyDict",
"RunnableConfig",
"acall_func_with_variable_args",
"call_func_with_variable_args",
"ensure_config",
"get_async_callback_manager_for_config",
"get_callback_manager_for_config",
"get_config_list",
"get_executor_for_config",
"merge_configs",
"patch_config",
]
|
from langchain_core.runnables.config import (
EmptyDict,
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
get_config_list,
get_executor_for_config,
merge_configs,
patch_config,
)
__all__ = [
"EmptyDict",
"RunnableConfig",
"ensure_config",
"get_config_list",
"patch_config",
"merge_configs",
"acall_func_with_variable_args",
"call_func_with_variable_args",
"get_callback_manager_for_config",
"get_async_callback_manager_for_config",
"get_executor_for_config",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize'
]
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
class TestXAIStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
return {"model": "grok-beta"}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"XAI_API_KEY": "api_key",
},
{
"model": "grok-beta",
},
{
"xai_api_key": "api_key",
"xai_api_base": "https://api.x.ai/v1/",
},
)
|
"""Standard LangChain interface tests"""
from typing import Tuple, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
class TestXAIStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
return {"model": "grok-beta"}
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"XAI_API_KEY": "api_key",
},
{
"model": "grok-beta",
},
{
"xai_api_key": "api_key",
"xai_api_base": "https://api.x.ai/v1/",
},
)
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor]
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
else:
AnyTensor = Union[NdArray] # type: ignore
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
try:
import torch # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
is_torch_available = True
except ImportError:
is_torch_available = False
try:
import tensorflow as tf # type: ignore # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
is_tf_available = True
except (ImportError, TypeError):
is_tf_available = False
if is_torch_available and is_tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor]
elif is_torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif is_tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
else:
AnyTensor = Union[NdArray] # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D, BboxOverlaps2D_GLIP
from .match_cost import (BBoxL1Cost, BinaryFocalLossCost, ClassificationCost,
CrossEntropyLossCost, DiceCost, FocalLossCost,
IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .multi_instance_assigner import MultiInstanceAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .topk_hungarian_assigner import TopkHungarianAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'BinaryFocalLossCost', 'MaxIoUAssigner',
'ApproxMaxIoUAssigner', 'AssignResult', 'PointAssigner', 'ATSSAssigner',
'CenterRegionAssigner', 'GridAssigner', 'HungarianAssigner',
'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'TopkHungarianAssigner', 'BBoxL1Cost',
'ClassificationCost', 'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost',
'IoUCost', 'BboxOverlaps2D', 'DynamicSoftLabelAssigner',
'MultiInstanceAssigner', 'BboxOverlaps2D_GLIP'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D, BboxOverlaps2D_GLIP
from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .multi_instance_assigner import MultiInstanceAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .topk_hungarian_assigner import TopkHungarianAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'TopkHungarianAssigner', 'BBoxL1Cost',
'ClassificationCost', 'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost',
'IoUCost', 'BboxOverlaps2D', 'DynamicSoftLabelAssigner',
'MultiInstanceAssigner', 'BboxOverlaps2D_GLIP'
]
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal, is_simple_tensor
@_register_explicit_noop(
PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_passthrough=True
)
def uniform_temporal_subsample(inpt: datapoints._VideoTypeJIT, num_samples: int) -> datapoints._VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return uniform_temporal_subsample_video(inpt, num_samples)
elif isinstance(inpt, datapoints.Datapoint):
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples)
else:
raise TypeError(
f"Input can either be a plain tensor or any TorchVision datapoint, but got {type(inpt)} instead."
)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
def uniform_temporal_subsample(inpt: datapoints._VideoTypeJIT, num_samples: int) -> datapoints._VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return uniform_temporal_subsample_video(inpt, num_samples)
elif isinstance(inpt, datapoints.Video):
output = uniform_temporal_subsample_video(inpt.as_subclass(torch.Tensor), num_samples)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
"""Interface for tools."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, tool
class InvalidTool(BaseTool): # type: ignore[override]
"""Tool that is run when invalid tool name is encountered by agent."""
name: str = "invalid_tool"
"""Name of the tool."""
description: str = "Called when tool name is invalid. Suggests valid tool names."
"""Description of the tool."""
def _run(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
async def _arun(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
__all__ = ["InvalidTool", "tool"]
|
"""Interface for tools."""
from typing import List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, tool
class InvalidTool(BaseTool): # type: ignore[override]
"""Tool that is run when invalid tool name is encountered by agent."""
name: str = "invalid_tool"
"""Name of the tool."""
description: str = "Called when tool name is invalid. Suggests valid tool names."
"""Description of the tool."""
def _run(
self,
requested_tool_name: str,
available_tool_names: List[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
async def _arun(
self,
requested_tool_name: str,
available_tool_names: List[str],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
__all__ = ["InvalidTool", "tool"]
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with (
gzip.open(filepath, "rt", encoding="utf8")
if filename.endswith(".gz")
else open(filepath, encoding="utf-8")
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with (
gzip.open(filepath, "rt", encoding="utf8")
if filename.endswith(".gz")
else open(filepath, encoding="utf-8") as fIn
):
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
data_root = 'data/OpenImages/'
data = dict(
train=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-train-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np'),
val=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np',
meta_file=data_root +
'challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-'
'human-imagelabels.csv'),
test=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np',
meta_file=data_root +
'challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-'
'human-imagelabels.csv'))
evaluation = dict(interval=1, metric='mAP')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
data_root = 'data/OpenImages/'
data = dict(
train=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-train-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np'),
val=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np',
meta_file=data_root +
'challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-'
'human-imagelabels.csv'),
test=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np',
meta_file=data_root +
'challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-'
'human-imagelabels.csv'))
evaluation = dict(interval=1, metric='mAP')
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='IoULoss', loss_weight=10.0))))
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='IoULoss', loss_weight=10.0))))
|
# This is different from the TTA of official CenterNet.
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
tta_pipeline = [
dict(type='LoadImageFromFile', to_float32=True, backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
# ``RandomFlip`` must be placed before ``RandomCenterCropPad``,
# otherwise bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
],
[dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'flip', 'flip_direction', 'border'))
]
])
]
|
# This is different from the TTA of official CenterNet.
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
tta_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[
[
# ``RandomFlip`` must be placed before ``RandomCenterCropPad``,
# otherwise bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
],
[dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'flip', 'flip_direction', 'border'))
]
])
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.data_elements import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
"""System message."""
from typing import Any, Literal, Union
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class SystemMessage(BaseMessage):
"""Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
"""
type: Literal["system"] = "system"
"""The type of the message (used for serialization). Defaults to "system"."""
def __init__(
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg.
Args:
content: The string contents of the message.
kwargs: Additional fields to pass to the message.
"""
super().__init__(content=content, **kwargs)
class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""System Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "SystemMessageChunk"."""
|
"""System message."""
from typing import Any, Literal, Union
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class SystemMessage(BaseMessage):
"""Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
"""
type: Literal["system"] = "system"
"""The type of the message (used for serialization). Defaults to "system"."""
def __init__(
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg.
Args:
content: The string contents of the message.
kwargs: Additional fields to pass to the message.
"""
super().__init__(content=content, **kwargs)
SystemMessage.model_rebuild()
class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""System Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "SystemMessageChunk"."""
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
assert isinstance(img.bytes_, ImageBytes)
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert isinstance(img.bytes_, ImageBytes)
assert len(img.bytes_) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
assert isinstance(img.bytes_, ImageBytes)
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert isinstance(img.bytes_, ImageBytes)
assert len(img.bytes_) > 0
|
"""Test text splitting functionality using NLTK and Spacy based sentence splitters."""
from typing import Any
import nltk
import pytest
from langchain_core.documents import Document
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.spacy import SpacyTextSplitter
def setup_module() -> None:
nltk.download("punkt_tab")
@pytest.fixture()
def spacy() -> Any:
try:
import spacy
except ImportError:
pytest.skip("Spacy not installed.")
spacy.cli.download("en_core_web_sm") # type: ignore[attr-defined,operator,unused-ignore]
return spacy
def test_nltk_text_splitting_args() -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
NLTKTextSplitter(chunk_size=2, chunk_overlap=4)
def test_spacy_text_splitting_args(spacy: Any) -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
SpacyTextSplitter(chunk_size=2, chunk_overlap=4)
def test_nltk_text_splitter() -> None:
"""Test splitting by sentence using NLTK."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = NLTKTextSplitter(separator=separator)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(separator=separator, pipeline=pipeline)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter_strip_whitespace(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(
separator=separator, pipeline=pipeline, strip_whitespace=False
)
output = splitter.split_text(text)
expected_output = [f"This is sentence one. {separator}And this is sentence two."]
assert output == expected_output
def test_nltk_text_splitter_args() -> None:
"""Test invalid arguments for NLTKTextSplitter."""
with pytest.raises(ValueError):
NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="\n\n",
use_span_tokenize=True,
)
def test_nltk_text_splitter_with_add_start_index() -> None:
splitter = NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="",
use_span_tokenize=True,
add_start_index=True,
)
txt = (
"Innovation drives our success. "
"Collaboration fosters creative solutions. "
"Efficiency enhances data management."
)
docs = [Document(txt)]
chunks = splitter.split_documents(docs)
assert len(chunks) == 2
for chunk in chunks:
s_i = chunk.metadata["start_index"]
assert chunk.page_content == txt[s_i : s_i + len(chunk.page_content)]
|
"""Test text splitting functionality using NLTK and Spacy based sentence splitters."""
from typing import Any
import nltk
import pytest
from langchain_core.documents import Document
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.spacy import SpacyTextSplitter
def setup_module() -> None:
nltk.download("punkt_tab")
@pytest.fixture()
def spacy() -> Any:
try:
import spacy
except ImportError:
pytest.skip("Spacy not installed.")
spacy.cli.download("en_core_web_sm") # type: ignore
return spacy
def test_nltk_text_splitting_args() -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
NLTKTextSplitter(chunk_size=2, chunk_overlap=4)
def test_spacy_text_splitting_args(spacy: Any) -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
SpacyTextSplitter(chunk_size=2, chunk_overlap=4)
def test_nltk_text_splitter() -> None:
"""Test splitting by sentence using NLTK."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = NLTKTextSplitter(separator=separator)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(separator=separator, pipeline=pipeline)
output = splitter.split_text(text)
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
assert output == expected_output
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
def test_spacy_text_splitter_strip_whitespace(pipeline: str, spacy: Any) -> None:
"""Test splitting by sentence using Spacy."""
text = "This is sentence one. And this is sentence two."
separator = "|||"
splitter = SpacyTextSplitter(
separator=separator, pipeline=pipeline, strip_whitespace=False
)
output = splitter.split_text(text)
expected_output = [f"This is sentence one. {separator}And this is sentence two."]
assert output == expected_output
def test_nltk_text_splitter_args() -> None:
"""Test invalid arguments for NLTKTextSplitter."""
with pytest.raises(ValueError):
NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="\n\n",
use_span_tokenize=True,
)
def test_nltk_text_splitter_with_add_start_index() -> None:
splitter = NLTKTextSplitter(
chunk_size=80,
chunk_overlap=0,
separator="",
use_span_tokenize=True,
add_start_index=True,
)
txt = (
"Innovation drives our success. "
"Collaboration fosters creative solutions. "
"Efficiency enhances data management."
)
docs = [Document(txt)]
chunks = splitter.split_documents(docs)
assert len(chunks) == 2
for chunk in chunks:
s_i = chunk.metadata["start_index"]
assert chunk.page_content == txt[s_i : s_i + len(chunk.page_content)]
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocList[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocList[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocList.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocList[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocArray[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocArray[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocArray.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocArray[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
|
import hashlib
import secrets
from typing import NamedTuple
class APIKeyContainer(NamedTuple):
"""Container for API key parts."""
raw: str
prefix: str
postfix: str
hash: str
class APIKeyManager:
PREFIX: str = "agpt_"
PREFIX_LENGTH: int = 8
POSTFIX_LENGTH: int = 8
def generate_api_key(self) -> APIKeyContainer:
"""Generate a new API key with all its parts."""
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
return APIKeyContainer(
raw=raw_key,
prefix=raw_key[: self.PREFIX_LENGTH],
postfix=raw_key[-self.POSTFIX_LENGTH :],
hash=hashlib.sha256(raw_key.encode()).hexdigest(),
)
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
from typing import NamedTuple
import secrets
import hashlib
class APIKeyContainer(NamedTuple):
"""Container for API key parts."""
raw: str
prefix: str
postfix: str
hash: str
class APIKeyManager:
PREFIX: str = "agpt_"
PREFIX_LENGTH: int = 8
POSTFIX_LENGTH: int = 8
def generate_api_key(self) -> APIKeyContainer:
"""Generate a new API key with all its parts."""
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
return APIKeyContainer(
raw=raw_key,
prefix=raw_key[:self.PREFIX_LENGTH],
postfix=raw_key[-self.POSTFIX_LENGTH:],
hash=hashlib.sha256(raw_key.encode()).hexdigest()
)
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.stateless_scope import StatelessScope
class TestStatelessScope(testing.TestCase):
def test_basic_flow(self):
var1 = backend.Variable(np.zeros((2,)))
var2 = backend.Variable(np.zeros((2,)))
var_out = backend.Variable(np.zeros((2,)))
value1 = ops.ones(shape=(2,))
value2 = ops.ones(shape=(2,))
with StatelessScope(
state_mapping=[(var1, value1), (var2, value2)]
) as scope:
out = var1 + var2
var_out.assign(out)
var_out_value = var_out + 0.0
# Inside scope: new value is used.
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Out of scope: old value is used.
var_out_value = var_out + 0.0
self.assertAllClose(var_out_value, np.zeros((2,)))
# Updates are tracked.
var_out_value = scope.get_current_value(var_out)
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Updates can be reapplied.
var_out.assign(scope.get_current_value(var_out))
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
def test_invalid_key_in_state_mapping(self):
# var1 = backend.Variable(np.zeros((2,)))
invalid_key = "not_a_keras_variable"
value1 = ops.ones(shape=(2,))
with self.assertRaisesRegex(
ValueError, "all keys in argument `mapping` must be Variable"
):
StatelessScope(state_mapping=[(invalid_key, value1)])
def test_invalid_value_shape_in_state_mapping(self):
var1 = backend.Variable(np.zeros((2,)))
invalid_value = ops.ones(shape=(3,)) # Incorrect shape
with self.assertRaisesRegex(
ValueError, "all values in argument `mapping` must be tensors with"
):
StatelessScope(state_mapping=[(var1, invalid_value)])
|
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.stateless_scope import StatelessScope
class TestStatelessScope(testing.TestCase):
def test_basic_flow(self):
var1 = backend.Variable(np.zeros((2,)))
var2 = backend.Variable(np.zeros((2,)))
var_out = backend.Variable(np.zeros((2,)))
value1 = ops.ones(shape=(2,))
value2 = ops.ones(shape=(2,))
with StatelessScope(
state_mapping=[(var1, value1), (var2, value2)]
) as scope:
out = var1 + var2
var_out.assign(out)
var_out_value = var_out + 0.0
# Inside scope: new value is used.
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Out of scope: old value is used.
var_out_value = var_out + 0.0
self.assertAllClose(var_out_value, np.zeros((2,)))
# Updates are tracked.
var_out_value = scope.get_current_value(var_out)
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Updates can be reapplied.
var_out.assign(scope.get_current_value(var_out))
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
def test_invalid_key_in_state_mapping(self):
# var1 = backend.Variable(np.zeros((2,)))
invalid_key = "not_a_keras_variable"
value1 = ops.ones(shape=(2,))
with self.assertRaisesRegex(
ValueError, "all keys in argument `mapping` must be KerasVariable"
):
StatelessScope(state_mapping=[(invalid_key, value1)])
def test_invalid_value_shape_in_state_mapping(self):
var1 = backend.Variable(np.zeros((2,)))
invalid_value = ops.ones(shape=(3,)) # Incorrect shape
with self.assertRaisesRegex(
ValueError, "all values in argument `mapping` must be tensors with"
):
StatelessScope(state_mapping=[(var1, invalid_value)])
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
pass
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
requires_trainable_backend = pytest.mark.skipif(
backend() == "numpy" or backend() == "openvino",
reason="Trainer not implemented for NumPy and OpenVINO backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
def skip_if_backend(given_backend, reason):
return pytest.mark.skipif(backend() == given_backend, reason=reason)
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
pass
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
requires_trainable_backend = pytest.mark.skipif(
backend() == "numpy",
reason="Trainer not implemented for NumPy backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_grayscale, to_tensor # usort: skip
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_grayscale, to_tensor # usort: skip
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackReIDInputs,
PackTrackInputs, ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadSemSegAnnotations,
LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixScaleResize, FixShapeResize,
MinIoURandomCrop, MixUp, Mosaic, Pad,
PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs', 'PackReIDInputs', 'FixScaleResize',
'LoadSemSegAnnotations'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackReIDInputs,
PackTrackInputs, ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixScaleResize, FixShapeResize,
MinIoURandomCrop, MixUp, Mosaic, Pad,
PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs', 'PackReIDInputs', 'FixScaleResize'
]
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from typing_extensions import override
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError as e:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg) from e
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
if hasattr(node, "schema") and callable(node.schema):
# Pydantic model
return node.schema()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
@override
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from typing_extensions import override
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg)
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
if hasattr(node, "schema") and callable(getattr(node, "schema")):
# Pydantic model
return getattr(node, "schema")()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
@override
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.content_blocks import (
convert_to_openai_image_block,
is_data_content_block,
)
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = (
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_openai_image_block",
"convert_to_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
)
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_image_block": "content_blocks",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"is_data_content_block": "content_blocks",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.content_blocks import (
convert_to_openai_image_block,
is_data_content_block,
)
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = [
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_openai_image_block",
"convert_to_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
]
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_image_block": "content_blocks",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"is_data_content_block": "content_blocks",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from typing import Any
from langchain_core.callbacks import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import (
InputTokenDetails,
OutputTokenDetails,
UsageMetadata,
add_usage,
)
from langchain_core.outputs import ChatResult
usage1 = UsageMetadata(
input_tokens=1,
output_tokens=2,
total_tokens=3,
)
usage2 = UsageMetadata(
input_tokens=4,
output_tokens=5,
total_tokens=9,
)
usage3 = UsageMetadata(
input_tokens=10,
output_tokens=20,
total_tokens=30,
input_token_details=InputTokenDetails(audio=5),
output_token_details=OutputTokenDetails(reasoning=10),
)
usage4 = UsageMetadata(
input_tokens=5,
output_tokens=10,
total_tokens=15,
input_token_details=InputTokenDetails(audio=3),
output_token_details=OutputTokenDetails(reasoning=5),
)
messages = [
AIMessage("Response 1", usage_metadata=usage1),
AIMessage("Response 2", usage_metadata=usage2),
AIMessage("Response 3", usage_metadata=usage3),
AIMessage("Response 4", usage_metadata=usage4),
]
class FakeChatModelWithResponseMetadata(GenericFakeChatModel):
model_name: str
def _generate(self, *args: Any, **kwargs: Any) -> ChatResult:
result = super()._generate(*args, **kwargs)
result.generations[0].message.response_metadata = {
"model_name": self.model_name
}
return result
def test_usage_callback() -> None:
llm = FakeChatModelWithResponseMetadata(
messages=iter(messages), model_name="test_model"
)
# Test context manager
with get_usage_metadata_callback() as cb:
_ = llm.invoke("Message 1")
_ = llm.invoke("Message 2")
total_1_2 = add_usage(usage1, usage2)
assert cb.usage_metadata == {"test_model": total_1_2}
_ = llm.invoke("Message 3")
_ = llm.invoke("Message 4")
total_3_4 = add_usage(usage3, usage4)
assert cb.usage_metadata == {"test_model": add_usage(total_1_2, total_3_4)}
# Test via config
llm = FakeChatModelWithResponseMetadata(
messages=iter(messages[:2]), model_name="test_model"
)
callback = UsageMetadataCallbackHandler()
_ = llm.batch(["Message 1", "Message 2"], config={"callbacks": [callback]})
assert callback.usage_metadata == {"test_model": total_1_2}
# Test multiple models
llm_1 = FakeChatModelWithResponseMetadata(
messages=iter(messages[:2]), model_name="test_model_1"
)
llm_2 = FakeChatModelWithResponseMetadata(
messages=iter(messages[2:4]), model_name="test_model_2"
)
callback = UsageMetadataCallbackHandler()
_ = llm_1.batch(["Message 1", "Message 2"], config={"callbacks": [callback]})
_ = llm_2.batch(["Message 3", "Message 4"], config={"callbacks": [callback]})
assert callback.usage_metadata == {
"test_model_1": total_1_2,
"test_model_2": total_3_4,
}
async def test_usage_callback_async() -> None:
llm = FakeChatModelWithResponseMetadata(
messages=iter(messages), model_name="test_model"
)
# Test context manager
with get_usage_metadata_callback() as cb:
_ = await llm.ainvoke("Message 1")
_ = await llm.ainvoke("Message 2")
total_1_2 = add_usage(usage1, usage2)
assert cb.usage_metadata == {"test_model": total_1_2}
_ = await llm.ainvoke("Message 3")
_ = await llm.ainvoke("Message 4")
total_3_4 = add_usage(usage3, usage4)
assert cb.usage_metadata == {"test_model": add_usage(total_1_2, total_3_4)}
# Test via config
llm = FakeChatModelWithResponseMetadata(
messages=iter(messages[:2]), model_name="test_model"
)
callback = UsageMetadataCallbackHandler()
_ = await llm.abatch(["Message 1", "Message 2"], config={"callbacks": [callback]})
assert callback.usage_metadata == {"test_model": total_1_2}
|
from itertools import cycle
from langchain_core.callbacks import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import (
InputTokenDetails,
OutputTokenDetails,
UsageMetadata,
add_usage,
)
usage1 = UsageMetadata(
input_tokens=1,
output_tokens=2,
total_tokens=3,
)
usage2 = UsageMetadata(
input_tokens=4,
output_tokens=5,
total_tokens=9,
)
usage3 = UsageMetadata(
input_tokens=10,
output_tokens=20,
total_tokens=30,
input_token_details=InputTokenDetails(audio=5),
output_token_details=OutputTokenDetails(reasoning=10),
)
usage4 = UsageMetadata(
input_tokens=5,
output_tokens=10,
total_tokens=15,
input_token_details=InputTokenDetails(audio=3),
output_token_details=OutputTokenDetails(reasoning=5),
)
messages = [
AIMessage("Response 1", usage_metadata=usage1),
AIMessage("Response 2", usage_metadata=usage2),
AIMessage("Response 3", usage_metadata=usage3),
AIMessage("Response 4", usage_metadata=usage4),
]
def test_usage_callback() -> None:
llm = GenericFakeChatModel(messages=cycle(messages))
# Test context manager
with get_usage_metadata_callback() as cb:
_ = llm.invoke("Message 1")
_ = llm.invoke("Message 2")
total_1_2 = add_usage(usage1, usage2)
assert cb.usage_metadata == total_1_2
_ = llm.invoke("Message 3")
_ = llm.invoke("Message 4")
total_3_4 = add_usage(usage3, usage4)
assert cb.usage_metadata == add_usage(total_1_2, total_3_4)
# Test via config
callback = UsageMetadataCallbackHandler()
_ = llm.batch(["Message 1", "Message 2"], config={"callbacks": [callback]})
assert callback.usage_metadata == total_1_2
async def test_usage_callback_async() -> None:
llm = GenericFakeChatModel(messages=cycle(messages))
# Test context manager
with get_usage_metadata_callback() as cb:
_ = await llm.ainvoke("Message 1")
_ = await llm.ainvoke("Message 2")
total_1_2 = add_usage(usage1, usage2)
assert cb.usage_metadata == total_1_2
_ = await llm.ainvoke("Message 3")
_ = await llm.ainvoke("Message 4")
total_3_4 = add_usage(usage3, usage4)
assert cb.usage_metadata == add_usage(total_1_2, total_3_4)
# Test via config
callback = UsageMetadataCallbackHandler()
_ = await llm.abatch(["Message 1", "Message 2"], config={"callbacks": [callback]})
assert callback.usage_metadata == total_1_2
|
# Copyright 2025 HiDream-ai Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Union
import numpy as np
import PIL.Image
from ...utils import BaseOutput
@dataclass
class HiDreamImagePipelineOutput(BaseOutput):
"""
Output class for HiDreamImage pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
"""
images: Union[List[PIL.Image.Image], np.ndarray]
|
from dataclasses import dataclass
from typing import List, Union
import numpy as np
import PIL.Image
from ...utils import BaseOutput
@dataclass
class HiDreamImagePipelineOutput(BaseOutput):
"""
Output class for HiDreamImage pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
"""
images: Union[List[PIL.Image.Image], np.ndarray]
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import contextlib
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
self._current_iterator = None
self._epoch_iterator = None
self._steps_seen = 0
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def _interrupted_warning(self):
warnings.warn(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
def reset(self):
self._current_iterator = None
self._num_batches = self.data_adapter.num_batches
self._steps_seen = 0
self._epoch_iterator = None
self.data_adapter.on_epoch_end()
def _enumerate_iterator(self):
self.data_adapter.on_epoch_begin()
steps_per_epoch = self.steps_per_epoch or self._num_batches or -1
if steps_per_epoch > 0:
if self._current_iterator is None or self.steps_per_epoch is None:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
for step in range(0, steps_per_epoch, self.steps_per_execution):
if self._num_batches and self._steps_seen >= self._num_batches:
if self.steps_per_epoch:
self._interrupted_warning()
break
self._steps_seen += self.steps_per_execution
yield step, self._current_iterator
if self._num_batches and self._steps_seen >= self._num_batches:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
else:
iterator = iter(self._get_iterator())
step = -self.steps_per_execution
while True:
step += self.steps_per_execution
self._steps_seen = step + self.steps_per_execution
yield step, iterator
self.data_adapter.on_epoch_end()
def __iter__(self):
self._epoch_iterator = self._enumerate_iterator()
return self
def __next__(self):
buffer = []
step, iterator = next(self._epoch_iterator)
with self.catch_stop_iteration():
for _ in range(self.steps_per_execution):
data = next(iterator)
buffer.append(data)
return step, buffer
if buffer:
return step, buffer
raise StopIteration
def enumerate_epoch(self):
for step, data in self:
yield step, data
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
except StopIteration:
if self._num_batches is None:
self._num_batches = self._steps_seen
self._interrupted_warning()
self._current_iterator = None
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import contextlib
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
self._current_iterator = None
self._epoch_iterator = None
self._steps_seen = 0
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def _interrupted_warning(self):
warnings.warn(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
def reset(self):
self._current_iterator = None
self._num_batches = self.data_adapter.num_batches
self._steps_seen = 0
self._epoch_iterator = None
def _enumerate_iterator(self):
self.data_adapter.on_epoch_begin()
steps_per_epoch = self.steps_per_epoch or self._num_batches or -1
if steps_per_epoch > 0:
if self._current_iterator is None or self.steps_per_epoch is None:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
for step in range(0, steps_per_epoch, self.steps_per_execution):
if self._num_batches and self._steps_seen >= self._num_batches:
if self.steps_per_epoch:
self._interrupted_warning()
break
self._steps_seen += self.steps_per_execution
yield step, self._current_iterator
if self._num_batches and self._steps_seen >= self._num_batches:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
else:
iterator = iter(self._get_iterator())
step = -self.steps_per_execution
while True:
step += self.steps_per_execution
self._steps_seen = step + self.steps_per_execution
yield step, iterator
self.data_adapter.on_epoch_end()
def __iter__(self):
self._epoch_iterator = self._enumerate_iterator()
return self
def __next__(self):
buffer = []
step, iterator = next(self._epoch_iterator)
with self.catch_stop_iteration():
for _ in range(self.steps_per_execution):
data = next(iterator)
buffer.append(data)
return step, buffer
if buffer:
return step, buffer
raise StopIteration
def enumerate_epoch(self):
for step, data in self:
yield step, data
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
except StopIteration:
if self._num_batches is None:
self._num_batches = self._steps_seen
self._interrupted_warning()
self._current_iterator = None
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.21.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.20.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents import AgentOutputParser
class XMLAgentOutputParser(AgentOutputParser):
"""Parses tool invocations and final answers in XML format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
<tool>search</tool>
<tool_input>what is 2 + 2</tool_input>
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
<final_answer>Foo</final_answer>
```
"""
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if "</tool>" in text:
tool, tool_input = text.split("</tool>")
_tool = tool.split("<tool>")[1]
_tool_input = tool_input.split("<tool_input>")[1]
if "</tool_input>" in _tool_input:
_tool_input = _tool_input.split("</tool_input>")[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
if "<final_answer>" in text:
_, answer = text.split("<final_answer>")
if "</final_answer>" in answer:
answer = answer.split("</final_answer>")[0]
return AgentFinish(return_values={"output": answer}, log=text)
raise ValueError
def get_format_instructions(self) -> str:
raise NotImplementedError
@property
def _type(self) -> str:
return "xml-agent"
|
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents import AgentOutputParser
class XMLAgentOutputParser(AgentOutputParser):
"""Parses tool invocations and final answers in XML format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
<tool>search</tool>
<tool_input>what is 2 + 2</tool_input>
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
<final_answer>Foo</final_answer>
```
"""
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if "</tool>" in text:
tool, tool_input = text.split("</tool>")
_tool = tool.split("<tool>")[1]
_tool_input = tool_input.split("<tool_input>")[1]
if "</tool_input>" in _tool_input:
_tool_input = _tool_input.split("</tool_input>")[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
elif "<final_answer>" in text:
_, answer = text.split("<final_answer>")
if "</final_answer>" in answer:
answer = answer.split("</final_answer>")[0]
return AgentFinish(return_values={"output": answer}, log=text)
else:
raise ValueError
def get_format_instructions(self) -> str:
raise NotImplementedError
@property
def _type(self) -> str:
return "xml-agent"
|
from llama_index.core import PromptTemplate
ZERO_SHOT_COMPLETION_TEMPLATE = (
"{instruction}\n{label_heading}: {label}\n{text_heading}: {synthetic_text}"
)
zero_shot_completion_template = PromptTemplate(ZERO_SHOT_COMPLETION_TEMPLATE)
SINGLE_EXAMPLE_TEMPLATE = (
"{label_heading}: {example_label}\n{text_heading}: {example_text}\n\n"
)
single_example_template = PromptTemplate(SINGLE_EXAMPLE_TEMPLATE)
FEW_SHOT_COMPLETION_TEMPLATE = (
"{instruction}\n\n"
"{few_shot_examples}"
"{label_heading}: {label}\n{text_heading}: {synthetic_text}"
)
few_shot_completion_template = PromptTemplate(FEW_SHOT_COMPLETION_TEMPLATE)
THREE_SHOT_COMPLETION_TEMPLATE = (
"{instruction}\n"
"{label_heading}: {example_label}\n{text_heading}: {example_text}\n\n"
"{label_heading}: {second_example_label}\n{text_heading}: {second_example_text}\n\n"
"{label_heading}: {third_example_label}\n{text_heading}: {third_example_text}\n\n"
"{label_heading}: {label}\n{text_heading}: {synthetic_text}"
)
three_shot_completion_template = PromptTemplate(THREE_SHOT_COMPLETION_TEMPLATE)
|
from llama_index.core import PromptTemplate
ZERO_SHOT_COMPLETION_TEMPLATE = (
"{instruction}\n" "{label_heading}: {label}\n{text_heading}: {synthetic_text}"
)
zero_shot_completion_template = PromptTemplate(ZERO_SHOT_COMPLETION_TEMPLATE)
SINGLE_EXAMPLE_TEMPLATE = (
"{label_heading}: {example_label}\n{text_heading}: {example_text}\n\n"
)
single_example_template = PromptTemplate(SINGLE_EXAMPLE_TEMPLATE)
FEW_SHOT_COMPLETION_TEMPLATE = (
"{instruction}\n\n"
"{few_shot_examples}"
"{label_heading}: {label}\n{text_heading}: {synthetic_text}"
)
few_shot_completion_template = PromptTemplate(FEW_SHOT_COMPLETION_TEMPLATE)
THREE_SHOT_COMPLETION_TEMPLATE = (
"{instruction}\n"
"{label_heading}: {example_label}\n{text_heading}: {example_text}\n\n"
"{label_heading}: {second_example_label}\n{text_heading}: {second_example_text}\n\n"
"{label_heading}: {third_example_label}\n{text_heading}: {third_example_text}\n\n"
"{label_heading}: {label}\n{text_heading}: {synthetic_text}"
)
three_shot_completion_template = PromptTemplate(THREE_SHOT_COMPLETION_TEMPLATE)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor class for Granite Speech."""
from typing import Union
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...tokenization_utils import PreTokenizedInput, TextInput
from ...utils import is_torch_available, logging
from ...utils.import_utils import requires_backends
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class GraniteSpeechProcessor(ProcessorMixin):
attributes = ["audio_processor", "tokenizer"]
audio_processor_class = "GraniteSpeechFeatureExtractor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
audio_processor,
tokenizer,
audio_token="<|audio|>",
chat_template=None,
):
self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
super().__init__(audio_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
audio: Union["torch.Tensor", list["torch.Tensor"]] = None,
device: str = "cpu",
images=None,
videos=None,
**kwargs,
) -> BatchFeature:
requires_backends(self, ["torch"])
text = self._get_validated_text(text)
prompt_strings = text
if audio is not None:
# NOTE - we intentionally avoid throwing for potentially misaligned
# text / audio inputs here because some inference engines will
# trigger the conditions due to the way they call multimodal
# processors, e.g., vLLM.
audio_inputs = self.audio_processor(audio, device=device)
# TODO (@alex-jw-brooks); we should add a util to get_num_audio_tokens
# from feature lengths and call it here, rather than returning it
# from the feature extractor.
audio_embed_sizes = audio_inputs.pop("audio_embed_sizes")
# Expand the audio placeholders to match the feature dims; this
# is similar to how many VLMs handle image tokens, e.g., llava next
prompt_strings = []
num_replaced = 0
for sample in text:
while self.audio_token in sample:
sample = sample.replace(
self.audio_token,
"<placeholder>" * audio_embed_sizes[num_replaced],
1,
)
num_replaced += 1
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.audio_token) for sample in prompt_strings]
else:
audio_inputs = {}
if "padding" not in kwargs:
kwargs["padding"] = True
text_inputs = self.tokenizer(prompt_strings, **kwargs)
return BatchFeature(data={**text_inputs, **audio_inputs})
def _get_validated_text(self, text: Union[str, list]) -> list[str]:
if isinstance(text, str):
return [text]
elif isinstance(text, list) and isinstance(text[0], str):
return text
raise TypeError("Invalid text provided! Text should be a string or list of strings.")
__all__ = ["GraniteSpeechProcessor"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor class for Granite Speech."""
from typing import Union
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...tokenization_utils import PreTokenizedInput, TextInput
from ...utils import is_torch_available, logging
from ...utils.import_utils import requires_backends
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class GraniteSpeechProcessor(ProcessorMixin):
attributes = ["audio_processor", "tokenizer"]
audio_processor_class = "GraniteSpeechFeatureExtractor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
audio_processor,
tokenizer,
audio_token="<|audio|>",
chat_template=None,
):
self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
super().__init__(audio_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
audio: Union["torch.Tensor", list["torch.Tensor"]] = None,
device: str = "cpu",
images=None,
videos=None,
**kwargs,
) -> BatchFeature:
requires_backends(self, ["torch"])
text = self._get_validated_text(text)
prompt_strings = text
if audio is not None:
# NOTE - we intentionally avoid throwing for potentially misaligned
# text / audio inputs here because some inference engines will
# trigger the conditions due to the way they call multimodal
# processors, e.g., vLLM.
audio_inputs = self.audio_processor(audio, device=device)
# TODO (@alex-jw-brooks); we should add a util to get_num_audio_tokens
# from feature lengths and call it here, rather than returning it
# from the feature extractor.
audio_embed_sizes = audio_inputs.pop("audio_embed_sizes")
# Expand the audio placeholders to match the feature dims; this
# is similar to how many VLMs handle image tokens, e.g., llava next
prompt_strings = []
num_replaced = 0
for sample in text:
while self.audio_token in sample:
sample = sample.replace(
self.audio_token,
"<placeholder>" * audio_embed_sizes[num_replaced],
1,
)
num_replaced += 1
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.audio_token) for sample in prompt_strings]
else:
audio_inputs = {}
text_inputs = self.tokenizer(prompt_strings, padding=True, **kwargs)
return BatchFeature(data={**text_inputs, **audio_inputs})
def _get_validated_text(self, text: Union[str, list]) -> list[str]:
if isinstance(text, str):
return [text]
elif isinstance(text, list) and isinstance(text[0], str):
return text
raise TypeError("Invalid text provided! Text should be a string or list of strings.")
__all__ = ["GraniteSpeechProcessor"]
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of [`NdArray`][docarray.typing.NdArray], to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioNdArray] = None
url: Optional[AudioUrl] = None
bytes_: Optional[AudioBytes] = None
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
# doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
# doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of [`NdArray`][docarray.typing.NdArray], to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
# doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
# doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
import logging
import os
import sys
from collections import defaultdict
import numpy as np
import pytrec_eval
import tqdm
from sentence_transformers import CrossEncoder, util
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz",
passage_filepath,
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info(f"Queries: {len(queries)}")
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
import logging
import os
import sys
from collections import defaultdict
import numpy as np
import pytrec_eval
import tqdm
from sentence_transformers import CrossEncoder, util
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz",
passage_filepath,
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info("Queries: {}".format(len(queries)))
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.playwright.toolkit import (
PlayWrightBrowserToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PlayWrightBrowserToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.playwright.toolkit import (
PlayWrightBrowserToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PlayWrightBrowserToolkit",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of VFNet. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of VFNet. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of VFNet. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of VFNet. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
max_seq_length = 75
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(sentence)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from datetime import datetime
import torch
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
max_seq_length = 75
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(sentence)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
import traceback
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Sentence Transformer model here, for example all-mpnet-base-v2, all-MiniLM-L6-v2, mixedbread-ai/mxbai-embed-large-v1
model_name = sys.argv[1] if len(sys.argv) > 1 else "sentence-transformers/all-mpnet-base-v2"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
import traceback
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Sentence Transformer model here, for example all-mpnet-base-v2, all-MiniLM-L6-v2, mixedbread-ai/mxbai-embed-large-v1
model_name = sys.argv[1] if len(sys.argv) > 1 else "sentence-transformers/all-mpnet-base-v2"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'matplotlib',
'seaborn',
'h5py',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import argparse
import logging
from typing import Optional
import torch
import torchaudio
from torchaudio.models.decoder import ctc_decoder, download_pretrained_files
logger = logging.getLogger(__name__)
def run_inference(args):
# get pretrained wav2vec2.0 model
bundle = getattr(torchaudio.pipelines, args.model)
model = bundle.get_model()
# get decoder files
files = download_pretrained_files("librispeech-4-gram")
decoder = ctc_decoder(
lexicon=files.lexicon,
tokens=files.tokens,
lm=files.lm,
nbest=args.nbest,
beam_size=args.beam_size,
beam_size_token=args.beam_size_token,
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_score,
sil_score=args.sil_score,
log_add=False,
)
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url=args.split, download=False)
total_edit_distance = 0
total_length = 0
for idx, sample in enumerate(dataset):
waveform, _, transcript, _, _, _ = sample
transcript = transcript.strip().lower().strip()
with torch.inference_mode():
emission, _ = model(waveform)
results = decoder(emission)
total_edit_distance += torchaudio.functional.edit_distance(transcript.split(), results[0][0].words)
total_length += len(transcript.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER: {total_edit_distance / total_length}")
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--librispeech_path",
type=str,
help="folder where LibriSpeech is stored",
)
parser.add_argument(
"--split",
type=str,
help="LibriSpeech dataset split",
choices=["dev-clean", "dev-other", "test-clean", "test-other"],
default="test-other",
)
parser.add_argument(
"--model",
type=str,
default="WAV2VEC2_ASR_BASE_960H",
help="pretrained Wav2Vec2 model from torchaudio.pipelines",
)
parser.add_argument("--nbest", type=int, default=1, help="number of best hypotheses to return")
parser.add_argument(
"--beam-size", type=int, default=500, help="beam size for determining number of hypotheses to store"
)
parser.add_argument(
"--beam-size-token",
type=Optional[int],
default=None,
help="number of tokens to consider at each beam search step",
)
parser.add_argument("--beam-threshold", type=int, default=50, help="beam threshold for pruning hypotheses")
parser.add_argument(
"--lm-weight",
type=float,
default=1.74,
help="languge model weight",
)
parser.add_argument(
"--word-score",
type=float,
default=0.52,
help="word insertion score",
)
parser.add_argument("--unk_score", type=float, default=float("-inf"), help="unknown word insertion score")
parser.add_argument("--sil_score", type=float, default=0, help="silence insertion score")
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def _main():
args = _parse_args()
_init_logger(args.debug)
run_inference(args)
if __name__ == "__main__":
_main()
|
import argparse
import logging
from typing import Optional
import torch
import torchaudio
from torchaudio.prototype.ctc_decoder import download_pretrained_files, lexicon_decoder
logger = logging.getLogger(__name__)
def run_inference(args):
# get pretrained wav2vec2.0 model
bundle = getattr(torchaudio.pipelines, args.model)
model = bundle.get_model()
# get decoder files
files = download_pretrained_files("librispeech-4-gram")
decoder = lexicon_decoder(
lexicon=files.lexicon,
tokens=files.tokens,
lm=files.lm,
nbest=args.nbest,
beam_size=args.beam_size,
beam_size_token=args.beam_size_token,
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_score,
sil_score=args.sil_score,
log_add=False,
)
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url=args.split, download=False)
total_edit_distance = 0
total_length = 0
for idx, sample in enumerate(dataset):
waveform, _, transcript, _, _, _ = sample
transcript = transcript.strip().lower().strip()
with torch.inference_mode():
emission, _ = model(waveform)
results = decoder(emission)
total_edit_distance += torchaudio.functional.edit_distance(transcript.split(), results[0][0].words)
total_length += len(transcript.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER: {total_edit_distance / total_length}")
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--librispeech_path",
type=str,
help="folder where LibriSpeech is stored",
)
parser.add_argument(
"--split",
type=str,
help="LibriSpeech dataset split",
choices=["dev-clean", "dev-other", "test-clean", "test-other"],
default="test-other",
)
parser.add_argument(
"--model",
type=str,
default="WAV2VEC2_ASR_BASE_960H",
help="pretrained Wav2Vec2 model from torchaudio.pipelines",
)
parser.add_argument("--nbest", type=int, default=1, help="number of best hypotheses to return")
parser.add_argument(
"--beam-size", type=int, default=500, help="beam size for determining number of hypotheses to store"
)
parser.add_argument(
"--beam-size-token",
type=Optional[int],
default=None,
help="number of tokens to consider at each beam search step",
)
parser.add_argument("--beam-threshold", type=int, default=50, help="beam threshold for pruning hypotheses")
parser.add_argument(
"--lm-weight",
type=float,
default=1.74,
help="languge model weight",
)
parser.add_argument(
"--word-score",
type=float,
default=0.52,
help="word insertion score",
)
parser.add_argument("--unk_score", type=float, default=float("-inf"), help="unknown word insertion score")
parser.add_argument("--sil_score", type=float, default=0, help="silence insertion score")
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def _main():
args = _parse_args()
_init_logger(args.debug)
run_inference(args)
if __name__ == "__main__":
_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.