input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""DashScope api utils."""
from http import HTTPStatus
from typing import Any, Dict, List, Sequence, cast
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
)
from llama_index.core.base.llms.generic_utils import image_node_to_image_block
from llama_index.core.schema import ImageDocument, ImageNode
def dashscope_response_to_completion_response(response: Any) -> CompletionResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
if content:
content = content[0]["text"]
else:
content = ""
return CompletionResponse(text=content, raw=response)
else:
return CompletionResponse(text="", raw=response)
def dashscope_response_to_chat_response(
response: Any,
) -> ChatResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
role = response["output"]["choices"][0]["message"]["role"]
return ChatResponse(
message=ChatMessage(role=role, content=content), raw=response
)
else:
return ChatResponse(message=ChatMessage(), raw=response)
def chat_message_to_dashscope_multi_modal_messages(
chat_messages: Sequence[ChatMessage],
) -> List[Dict]:
messages = []
for msg in chat_messages:
messages.append({"role": msg.role.value, "content": msg.content})
return messages
def create_dashscope_multi_modal_chat_message(
prompt: str, role: str, image_documents: Sequence[ImageDocument]
) -> ChatMessage:
if image_documents is None:
message = ChatMessage(role=role, content=[{"text": prompt}])
else:
if all(isinstance(doc, ImageNode) for doc in image_document):
image_docs: List[ImageBlock] = [
image_node_to_image_block(doc) for doc in image_document
]
else:
image_docs = cast(List[ImageBlock], image_documents)
content = []
for image_document in image_docs:
content.append(
{
"image": (
image_document.image
if image_document.url is not None
else image_document.path
)
}
)
content.append({"text": prompt})
message = ChatMessage(role=role, content=content)
return message
def load_local_images(local_images: List[str]) -> List[ImageDocument]:
# load images into image documents
image_documents = []
for _, img in enumerate(local_images):
new_image_document = ImageDocument(image_path=img)
image_documents.append(new_image_document)
return image_documents
|
"""DashScope api utils."""
from http import HTTPStatus
from typing import Any, Dict, List, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
from llama_index.core.schema import ImageDocument
def dashscope_response_to_completion_response(response: Any) -> CompletionResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
if content:
content = content[0]["text"]
else:
content = ""
return CompletionResponse(text=content, raw=response)
else:
return CompletionResponse(text="", raw=response)
def dashscope_response_to_chat_response(
response: Any,
) -> ChatResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
role = response["output"]["choices"][0]["message"]["role"]
return ChatResponse(
message=ChatMessage(role=role, content=content), raw=response
)
else:
return ChatResponse(message=ChatMessage(), raw=response)
def chat_message_to_dashscope_multi_modal_messages(
chat_messages: Sequence[ChatMessage],
) -> List[Dict]:
messages = []
for msg in chat_messages:
messages.append({"role": msg.role.value, "content": msg.content})
return messages
def create_dashscope_multi_modal_chat_message(
prompt: str, role: str, image_documents: Sequence[ImageDocument]
) -> ChatMessage:
if image_documents is None:
message = ChatMessage(role=role, content=[{"text": prompt}])
else:
content = []
for image_document in image_documents:
content.append(
{
"image": (
image_document.image_url
if image_document.image_url is not None
else image_document.image_path
)
}
)
content.append({"text": prompt})
message = ChatMessage(role=role, content=content)
return message
def load_local_images(local_images: List[str]) -> List[ImageDocument]:
# load images into image documents
image_documents = []
for _, img in enumerate(local_images):
new_image_document = ImageDocument(image_path=img)
image_documents.append(new_image_document)
return image_documents
|
import json
import datasets
from tests.trainer.test_trainer import StoreLossCallback
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_count,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_accelerator,
torch_device,
)
class TestTrainerDistributedLoss(TestCasePlus):
@require_torch_multi_accelerator
def test_trainer(self):
device_count = backend_device_count(torch_device)
min_bs = 1
output_dir = self.get_auto_remove_tmp_dir()
for gpu_num, enable, bs, name in (
(1, True, min_bs * device_count, "base"),
(device_count, False, min_bs, "broken"),
(device_count, True, min_bs, "fixed"),
):
distributed_args = f"""--nproc_per_node={gpu_num}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed_loss.py
""".split()
args = f"--output_dir {output_dir}/{name} --per_device_train_batch_size {bs} --average_tokens_across_devices {enable}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
with open(f"{output_dir}/base_losses.json") as f:
base_loss = json.load(f)
with open(f"{output_dir}/broken_losses.json") as f:
broken_loss = json.load(f)
with open(f"{output_dir}/fixed_losses.json") as f:
fixed_loss = json.load(f)
broken_diff = [abs(base_loss[i] - broken_loss[i]) for i in range(len(base_loss))]
fixed_diff = [abs(base_loss[i] - fixed_loss[i]) for i in range(len(base_loss))]
sum_base = sum(base_loss)
sum_broken = sum(broken_diff)
relative_broken = abs(sum_base - sum_broken) / max(sum_base, sum_broken)
self.assertGreater(max(broken_diff), 0.5)
self.assertLess(max(fixed_diff), 0.005)
self.assertLess(relative_broken, 0.1)
def run_distributed_training(training_args):
set_seed(42)
model_name = "nickypro/tinyllama-15M"
dataset_name = "wikitext"
dataset_config = "wikitext-2-raw-v1"
dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:17]")
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
def tokenize_function(examples):
return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True)
tokenized_dataset = dataset.map(tokenize_function, batched=True)
tokenizer.pad_token = tokenizer.eos_token
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
model = AutoModelForCausalLM.from_pretrained(model_name)
loss_callback = StoreLossCallback()
training_args.logging_steps = 1
training_args.max_steps = 10
training_args.learning_rate = 3e-4
training_args.disable_tqdm = True
training_args.dataloader_drop_last = True
training_args.report_to = []
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_dataset,
callbacks=[loss_callback],
data_collator=data_collator,
)
trainer.train()
with open(training_args.output_dir + "_losses.json", "w") as f:
json.dump(loss_callback.losses, f)
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
run_distributed_training(training_args)
|
import json
import datasets
import torch
from tests.trainer.test_trainer import StoreLossCallback
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
)
class TestTrainerDistributedLoss(TestCasePlus):
@require_torch_multi_gpu
def test_trainer(self):
device_count = torch.cuda.device_count()
min_bs = 1
output_dir = self.get_auto_remove_tmp_dir()
for gpu_num, enable, bs, name in (
(1, True, min_bs * device_count, "base"),
(device_count, False, min_bs, "broken"),
(device_count, True, min_bs, "fixed"),
):
distributed_args = f"""--nproc_per_node={gpu_num}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed_loss.py
""".split()
args = f"--output_dir {output_dir}/{name} --per_device_train_batch_size {bs} --average_tokens_across_devices {enable}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
with open(f"{output_dir}/base_losses.json") as f:
base_loss = json.load(f)
with open(f"{output_dir}/broken_losses.json") as f:
broken_loss = json.load(f)
with open(f"{output_dir}/fixed_losses.json") as f:
fixed_loss = json.load(f)
broken_diff = [abs(base_loss[i] - broken_loss[i]) for i in range(len(base_loss))]
fixed_diff = [abs(base_loss[i] - fixed_loss[i]) for i in range(len(base_loss))]
sum_base = sum(base_loss)
sum_broken = sum(broken_diff)
relative_broken = abs(sum_base - sum_broken) / max(sum_base, sum_broken)
self.assertGreater(max(broken_diff), 0.5)
self.assertLess(max(fixed_diff), 0.005)
self.assertLess(relative_broken, 0.1)
def run_distributed_training(training_args):
set_seed(42)
model_name = "nickypro/tinyllama-15M"
dataset_name = "wikitext"
dataset_config = "wikitext-2-raw-v1"
dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:17]")
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
def tokenize_function(examples):
return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True)
tokenized_dataset = dataset.map(tokenize_function, batched=True)
tokenizer.pad_token = tokenizer.eos_token
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
model = AutoModelForCausalLM.from_pretrained(model_name)
loss_callback = StoreLossCallback()
training_args.logging_steps = 1
training_args.max_steps = 10
training_args.learning_rate = 3e-4
training_args.disable_tqdm = True
training_args.dataloader_drop_last = True
training_args.report_to = []
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_dataset,
callbacks=[loss_callback],
data_collator=data_collator,
)
trainer.train()
with open(training_args.output_dir + "_losses.json", "w") as f:
json.dump(loss_callback.losses, f)
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
run_distributed_training(training_args)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity Stats: Row Non-Zero Mean: 63.13884735107422, Row Sparsity Mean: 0.9979313611984253
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseBinaryClassificationEvaluator,
SparseEncoder,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with two text columns and a class label column
# Using the Quora Duplicates dataset as an example
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
from typing import cast
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"Nodes": {"include": AGENT_NODE_INCLUDE}
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
"include": {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
**cast(
prisma.types.FindManyAgentNodeExecutionArgsFromAgentGraphExecution,
GRAPH_EXECUTION_INCLUDE_WITH_NODES["NodeExecutions"],
),
"where": {
"Node": {"is": {"AgentBlock": {"is": {"id": {"in": IO_BLOCK_IDs}}}}},
"NOT": [{"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE}],
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE}
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"AgentGraph": {
"include": {
**AGENT_GRAPH_INCLUDE,
"Executions": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
from typing import cast
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
from backend.util.type import typed_cast
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"Nodes": {
"include": typed_cast(
prisma.types.AgentNodeIncludeFromAgentNodeRecursive1,
prisma.types.AgentNodeIncludeFromAgentNode,
AGENT_NODE_INCLUDE,
)
}
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
"include": {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
**cast(
prisma.types.FindManyAgentNodeExecutionArgsFromAgentGraphExecution,
GRAPH_EXECUTION_INCLUDE_WITH_NODES["NodeExecutions"],
),
"where": {
"Node": typed_cast(
prisma.types.AgentNodeRelationFilter,
prisma.types.AgentNodeWhereInput,
{
"AgentBlock": {"id": {"in": IO_BLOCK_IDs}},
},
),
"NOT": [{"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE}],
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {
"include": typed_cast(
prisma.types.AgentNodeIncludeFromAgentNodeRecursive1,
prisma.types.AgentNodeInclude,
AGENT_NODE_INCLUDE,
)
}
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"AgentGraph": {
"include": {
**AGENT_GRAPH_INCLUDE,
"Executions": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import (
MetadataMode,
Node,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
node_copy = doc.node.model_copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
node_copy = doc.node.model_copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright 2024 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for codegen testlib."""
from xla.codegen.testlib import _extension
# Classes
# go/keep-sorted start
BufferAssignment = _extension.BufferAssignment
ComparisonDirection = _extension.ComparisonDirection
DotDimensionNumbers = _extension.DotDimensionNumbers
HloComputation = _extension.HloComputation
HloInstruction = _extension.HloInstruction
HloModule = _extension.HloModule
HloModuleConfig = _extension.HloModuleConfig
HloOpcode = _extension.HloOpcode
KernelDefinitionBase = _extension.KernelDefinitionBase
KernelEmitterBase = _extension.KernelEmitterBase
KernelRunner = _extension.KernelRunner
KernelSpec = _extension.KernelSpec
LlvmIrKernelSource = _extension.LlvmIrKernelSource
LlvmKernelDefinition = _extension.LlvmKernelDefinition
LlvmKernelEmitter = _extension.LlvmKernelEmitter
MlirKernelDefinition = _extension.MlirKernelDefinition
MlirKernelEmitter = _extension.MlirKernelEmitter
MlirKernelSource = _extension.MlirKernelSource
ScatterDimensionNumbers = _extension.ScatterDimensionNumbers
# go/keep-sorted end
# Functions
# go/keep-sorted start
build_hlo_computation = _extension.build_hlo_computation
# go/keep-sorted end
|
# Copyright 2024 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for codegen testlib."""
from xla.codegen.testlib import _extension
# Classes
# go/keep-sorted start
BufferAssignment = _extension.BufferAssignment
ComparisonDirection = _extension.ComparisonDirection
HloInstruction = _extension.HloInstruction
HloModule = _extension.HloModule
HloOpcode = _extension.HloOpcode
KernelEmmitter = _extension.KernelEmitter
KernelRunner = _extension.KernelRunner
KernelSpec = _extension.KernelSpec
# go/keep-sorted end
|
from datetime import timedelta
from typing import Optional
from torch._C._distributed_c10d import _DEFAULT_PG_TIMEOUT
__all__ = ["default_pg_timeout", "default_pg_nccl_timeout"]
# Default process group wide timeout, if applicable.
# This only applies to the non-nccl backends
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
default_pg_timeout: timedelta = _DEFAULT_PG_TIMEOUT
# Separate timeout for PGNCCL mainly because it's always been that way in the C++ layer, but until recently
# there was one default that applied across all backends in the python layer.
# Later, we could consider merging them back together at the c++ layer if we can align on a same value.
# (only if TORCH_NCCL_BLOCKING_WAIT or TORCH_NCCL_ASYNC_ERROR_HANDLING is set to 1).
try:
from torch._C._distributed_c10d import _DEFAULT_PG_NCCL_TIMEOUT
default_pg_nccl_timeout: Optional[timedelta] = _DEFAULT_PG_NCCL_TIMEOUT
except ImportError:
# if C++ NCCL support is not compiled, we don't have access to the default nccl value.
# if anyone is actually trying to use nccl in this state, it should error.
default_pg_nccl_timeout = None
|
from datetime import timedelta
from typing import Optional
from torch._C._distributed_c10d import _DEFAULT_PG_TIMEOUT
__all__ = ["default_pg_timeout", "default_pg_nccl_timeout"]
# Default process group wide timeout, if applicable.
# This only applies to the non-nccl backends
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
default_pg_timeout: timedelta = _DEFAULT_PG_TIMEOUT
# Separate timeout for PGNCCL mainly becuase it's always been that way in the C++ layer, but until recently
# there was one default that applied across all backends in the python layer.
# Later, we could consider merging them back together at the c++ layer if we can align on a same value.
# (only if TORCH_NCCL_BLOCKING_WAIT or TORCH_NCCL_ASYNC_ERROR_HANDLING is set to 1).
try:
from torch._C._distributed_c10d import _DEFAULT_PG_NCCL_TIMEOUT
default_pg_nccl_timeout: Optional[timedelta] = _DEFAULT_PG_NCCL_TIMEOUT
except ImportError:
# if C++ NCCL support is not compiled, we don't have access to the default nccl value.
# if anyone is actually trying to use nccl in this state, it should error.
default_pg_nccl_timeout = None
|
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2.utils
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2.functional import to_image_pil
from torchvision.transforms.v2.utils import has_all, has_any
IMAGE = make_image(color_space="RGB")
BOUNDING_BOX = make_bounding_box(format=datapoints.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor),
True,
),
(
(to_image_pil(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBox, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
import PIL.Image
import pytest
import torch
import torchvision.prototype.transforms.utils
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision.prototype import datapoints
from torchvision.prototype.transforms.functional import to_image_pil
from torchvision.prototype.transforms.utils import has_all, has_any
IMAGE = make_image(color_space="RGB")
BOUNDING_BOX = make_bounding_box(format=datapoints.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor),
True,
),
(
(to_image_pil(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBox, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
@nested_params([True, False])
def test_AddNoise(self, use_lengths):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
if use_lengths:
lengths = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True)
else:
lengths = None
snr = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, torch.float64)
assert gradcheck(add_noise, (waveform, noise, snr, lengths))
assert gradgradcheck(add_noise, (waveform, noise, snr, lengths))
def test_Preemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
preemphasis = T.Preemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(preemphasis, (waveform,))
assert gradgradcheck(preemphasis, (waveform,))
def test_Deemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
deemphasis = T.Deemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(deemphasis, (waveform,))
assert gradgradcheck(deemphasis, (waveform,))
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_AddNoise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, torch.float64)
assert gradcheck(add_noise, (waveform, noise, lengths, snr))
assert gradgradcheck(add_noise, (waveform, noise, lengths, snr))
def test_Preemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
preemphasis = T.Preemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(preemphasis, (waveform,))
assert gradgradcheck(preemphasis, (waveform,))
def test_Deemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
deemphasis = T.Deemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(deemphasis, (waveform,))
assert gradgradcheck(deemphasis, (waveform,))
|
from docarray.array.documentarray import DocumentArray
__all__ = ['DocumentArray']
|
from docarray.array.documentarray import DocumentArray
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import AnchorHead
class TestAnchorHead(TestCase):
def test_anchor_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
anchor_head = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import AnchorHead
class TestAnchorHead(TestCase):
def test_anchor_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
anchor_head = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import TextSplitter
from pydantic import ConfigDict
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"Refer to migration guide here for a recommended implementation using "
"LangGraph: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
". See also LangGraph guides for map-reduce: "
"https://langchain-ai.github.io/langgraph/how-tos/map-reduce/."
),
)
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
|
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import TextSplitter
from pydantic import ConfigDict
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"Refer to migration guide here for a recommended implementation using "
"LangGraph: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
". See also LangGraph guides for map-reduce: "
"https://langchain-ai.github.io/langgraph/how-tos/map-reduce/."
),
)
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adadelta"])
class Adadelta(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based
on adaptive learning rate per dimension to address two drawbacks:
- The continual decay of learning rates throughout training.
- The need for a manually selected global learning rate.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, the initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adadelta`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use 1.0.
rho: A floating point value. The decay rate. Defaults to `0.95`.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Zeiler, 2012](http://arxiv.org/abs/1212.5701)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adadelta",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.rho = rho
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulated_grads, self._accumulated_delta_vars = (
self.add_optimizer_variables(
var_list, ["accumulated_grad", "accumulated_delta_var"]
)
)
def update_step(self, grad, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
grad = ops.cast(grad, variable.dtype)
rho = self.rho
accumulated_grad = self._accumulated_grads[
self._get_variable_index(variable)
]
accumulated_delta_var = self._accumulated_delta_vars[
self._get_variable_index(variable)
]
def rms(x):
return ops.sqrt(ops.add(x, self.epsilon))
self.assign(
accumulated_grad,
ops.add(
rho * accumulated_grad, ops.multiply(1 - rho, ops.square(grad))
),
)
delta_var = ops.negative(
ops.divide(
ops.multiply(rms(accumulated_delta_var), grad),
rms(accumulated_grad),
)
)
self.assign(
accumulated_delta_var,
ops.add(
ops.multiply(rho, accumulated_delta_var),
ops.multiply(1 - rho, ops.square(delta_var)),
),
)
self.assign_add(variable, ops.multiply(lr, delta_var))
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"epsilon": self.epsilon,
}
)
return config
Adadelta.__doc__ = Adadelta.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adadelta"])
class Adadelta(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based
on adaptive learning rate per dimension to address two drawbacks:
- The continual decay of learning rates throughout training.
- The need for a manually selected global learning rate.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, the initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adadelta`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use 1.0.
rho: A floating point value. The decay rate. Defaults to `0.95`.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Zeiler, 2012](http://arxiv.org/abs/1212.5701)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adadelta",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.rho = rho
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulated_grads = self.add_optimizer_variables(
var_list, "accumulated_grad"
)
self._accumulated_delta_vars = self.add_optimizer_variables(
var_list, "accumulated_delta_var"
)
def update_step(self, grad, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
grad = ops.cast(grad, variable.dtype)
rho = self.rho
accumulated_grad = self._accumulated_grads[
self._get_variable_index(variable)
]
accumulated_delta_var = self._accumulated_delta_vars[
self._get_variable_index(variable)
]
def rms(x):
return ops.sqrt(ops.add(x, self.epsilon))
self.assign(
accumulated_grad,
ops.add(
rho * accumulated_grad, ops.multiply(1 - rho, ops.square(grad))
),
)
delta_var = ops.negative(
ops.divide(
ops.multiply(rms(accumulated_delta_var), grad),
rms(accumulated_grad),
)
)
self.assign(
accumulated_delta_var,
ops.add(
ops.multiply(rho, accumulated_delta_var),
ops.multiply(1 - rho, ops.square(delta_var)),
),
)
self.assign_add(variable, ops.multiply(lr, delta_var))
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"epsilon": self.epsilon,
}
)
return config
Adadelta.__doc__ = Adadelta.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
# coding: utf-8
from pathlib import Path
import pandas as pd
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'l1'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
print('Starting training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
callbacks=[lgb.early_stopping(stopping_rounds=5)])
print('Saving model...')
# save model to file
gbm.save_model('model.txt')
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
|
# coding: utf-8
from pathlib import Path
import pandas as pd
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'l1'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
print('Starting training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
early_stopping_rounds=5)
print('Saving model...')
# save model to file
gbm.save_model('model.txt')
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .param_scheduler_hook import ParamSchedulerHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook'
]
|
from typing import TYPE_CHECKING, TypeVar, List, Union, Optional, Dict, Sequence
if TYPE_CHECKING:
import numpy as np
import tensorflow
import torch
# Define the expected input type that your ANN search supports
MilvusArrayType = TypeVar(
'MilvusArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
from docarray import Document, DocumentArray
class FindMixin:
def _find(
self,
query: 'MilvusArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
param=None,
**kwargs
) -> List['DocumentArray']:
"""Returns `limit` approximate nearest neighbors given a batch of input queries.
If the query is a single query, should return a DocumentArray, otherwise a list of DocumentArrays containing
the closest Documents for each query.
"""
if param is None:
param = dict()
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
with self.loaded_collection():
results = self._collection.search(
data=query,
anns_field='embedding',
limit=limit,
expr=filter,
param=param,
output_fields=['serialized'],
**kwargs,
)
return self._docs_from_search_response(results, distance=self._config.distance)
def _filter(self, filter, limit=10, **kwargs):
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
with self.loaded_collection():
results = self._collection.query(
expr=filter,
limit=limit,
output_fields=['serialized'],
**kwargs,
)
return self._docs_from_query_response(results)[:limit]
|
from typing import TYPE_CHECKING, TypeVar, List, Union, Optional, Dict, Sequence
if TYPE_CHECKING:
import numpy as np
import tensorflow
import torch
# Define the expected input type that your ANN search supports
MilvusArrayType = TypeVar(
'MilvusArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
from docarray import Document, DocumentArray
class FindMixin:
def _find(
self,
query: 'MilvusArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
param=None,
**kwargs
) -> List['DocumentArray']:
"""Returns `limit` approximate nearest neighbors given a batch of input queries.
If the query is a single query, should return a DocumentArray, otherwise a list of DocumentArrays containing
the closest Documents for each query.
"""
if param is None:
param = dict()
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
with self.loaded_collection():
results = self._collection.search(
data=query,
anns_field='embedding',
limit=limit,
expr=filter,
param=param,
output_fields=['serialized'],
**kwargs,
)
return self._docs_from_search_response(results)
def _filter(self, filter, limit=10, **kwargs):
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
with self.loaded_collection():
results = self._collection.query(
expr=filter,
limit=limit,
output_fields=['serialized'],
**kwargs,
)
return self._docs_from_query_response(results)[:limit]
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
]
|
from .backend_utils import (
set_audio_backend,
)
from .case_utils import (
TempDirMixin,
HttpServerMixin,
TestBaseMixin,
PytorchTestCase,
TorchaudioTestCase,
is_ffmpeg_available,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoModule,
skipIfNoKaldi,
skipIfNoSox,
skipIfRocm,
skipIfNoQengine,
skipIfNoFFmpeg,
skipIfPy310,
)
from .data_utils import (
get_asset_path,
get_whitenoise,
get_sinusoid,
get_spectrogram,
)
from .func_utils import torch_script
from .image_utils import (
save_image,
get_image,
)
from .parameterized_utils import load_params, nested_params
from .wav_utils import (
get_wav_data,
normalize_wav,
load_wav,
save_wav,
)
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
]
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.backend import KerasTensor
from keras.src.layers import InputLayer
class InputLayerTest(testing.TestCase):
# Testing happy path for layer without input tensor
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
def test_input_basic(self, sparse):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
ndim = len(tuple((batch_size,) + input_shape))
init_kwargs = {
"shape": input_shape,
"batch_size": batch_size,
"dtype": dtype,
"sparse": sparse,
}
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
with self.assertRaisesRegex(
ValueError, "`sparse=True` is not supported"
):
InputLayer(**init_kwargs)
return
values = InputLayer(**init_kwargs)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.sparse, sparse)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output.ndim, ndim)
self.assertEqual(values.output.dtype, dtype)
self.assertEqual(values.output.sparse, sparse)
# Testing shape is not None and batch_shape is not None condition
def test_input_error1(self):
input_shape = (2, 3)
with self.assertRaisesRegex(
ValueError, "cannot pass both `shape` and `batch_shape`"
):
InputLayer(shape=input_shape, batch_shape=input_shape)
# Testing batch_size is not None and batch_shape is not None
def test_input_error2(self):
input_shape = (2, 3)
batch_size = 4
with self.assertRaisesRegex(
ValueError, "cannot pass both `batch_size` and `batch_shape`"
):
InputLayer(batch_size=batch_size, batch_shape=input_shape)
# Testing shape is None and batch_shape is None
def test_input_error3(self):
with self.assertRaisesRegex(ValueError, "pass a `shape` argument."):
InputLayer(shape=None, batch_shape=None)
# Testing Input tensor is not Keras tensor
def test_input_tensor_error(self):
input_shape = (2, 3)
batch_size = 4
input_tensor = np.zeros(input_shape)
with self.assertRaisesRegex(
ValueError, "Argument `input_tensor` must be a KerasTensor"
):
InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
)
# Testing happy path for layer with input tensor
def testing_input_tensor(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
input_tensor = KerasTensor(shape=input_shape, dtype=dtype)
values = InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
dtype=dtype,
)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output, input_tensor)
self.assertEqual(values.output.ndim, input_tensor.ndim)
self.assertEqual(values.output.dtype, dtype)
def test_input_shape_deprecated(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
with self.assertWarnsRegex(
UserWarning,
"Argument `input_shape` is deprecated. Use `shape` instead.",
):
layer = InputLayer(
input_shape=input_shape, batch_size=batch_size, dtype=dtype
)
self.assertEqual(layer.batch_shape[0], batch_size)
self.assertEqual(layer.batch_shape[1:], input_shape)
self.assertEqual(layer.dtype, dtype)
self.assertIsInstance(layer.output, KerasTensor)
def test_call_method(self):
layer = InputLayer(shape=(32,))
output = layer.call()
self.assertIsNone(output)
def test_numpy_shape(self):
# non-python int type shapes should be ok
InputLayer(shape=(np.int64(32),))
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.backend import KerasTensor
from keras.src.layers import InputLayer
class InputLayerTest(testing.TestCase, parameterized.TestCase):
# Testing happy path for layer without input tensor
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
def test_input_basic(self, sparse):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
ndim = len(tuple((batch_size,) + input_shape))
init_kwargs = {
"shape": input_shape,
"batch_size": batch_size,
"dtype": dtype,
"sparse": sparse,
}
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
with self.assertRaisesRegex(
ValueError, "`sparse=True` is not supported"
):
InputLayer(**init_kwargs)
return
values = InputLayer(**init_kwargs)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.sparse, sparse)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output.ndim, ndim)
self.assertEqual(values.output.dtype, dtype)
self.assertEqual(values.output.sparse, sparse)
# Testing shape is not None and batch_shape is not None condition
def test_input_error1(self):
input_shape = (2, 3)
with self.assertRaisesRegex(
ValueError, "cannot pass both `shape` and `batch_shape`"
):
InputLayer(shape=input_shape, batch_shape=input_shape)
# Testing batch_size is not None and batch_shape is not None
def test_input_error2(self):
input_shape = (2, 3)
batch_size = 4
with self.assertRaisesRegex(
ValueError, "cannot pass both `batch_size` and `batch_shape`"
):
InputLayer(batch_size=batch_size, batch_shape=input_shape)
# Testing shape is None and batch_shape is None
def test_input_error3(self):
with self.assertRaisesRegex(ValueError, "pass a `shape` argument."):
InputLayer(shape=None, batch_shape=None)
# Testing Input tensor is not Keras tensor
def test_input_tensor_error(self):
input_shape = (2, 3)
batch_size = 4
input_tensor = np.zeros(input_shape)
with self.assertRaisesRegex(
ValueError, "Argument `input_tensor` must be a KerasTensor"
):
InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
)
# Testing happy path for layer with input tensor
def testing_input_tensor(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
input_tensor = KerasTensor(shape=input_shape, dtype=dtype)
values = InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
dtype=dtype,
)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output, input_tensor)
self.assertEqual(values.output.ndim, input_tensor.ndim)
self.assertEqual(values.output.dtype, dtype)
def test_input_shape_deprecated(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
with self.assertWarnsRegex(
UserWarning,
"Argument `input_shape` is deprecated. Use `shape` instead.",
):
layer = InputLayer(
input_shape=input_shape, batch_size=batch_size, dtype=dtype
)
self.assertEqual(layer.batch_shape[0], batch_size)
self.assertEqual(layer.batch_shape[1:], input_shape)
self.assertEqual(layer.dtype, dtype)
self.assertIsInstance(layer.output, KerasTensor)
def test_call_method(self):
layer = InputLayer(shape=(32,))
output = layer.call()
self.assertIsNone(output)
def test_numpy_shape(self):
# non-python int type shapes should be ok
InputLayer(shape=(np.int64(32),))
|
from parameterized import parameterized
from torchaudio.io import AudioEffector
from torchaudio_unittest.common_utils import get_sinusoid, skipIfNoFFmpeg, TorchaudioTestCase
from .common import lt42
@skipIfNoFFmpeg
class EffectorTest(TorchaudioTestCase):
def test_null(self):
"""No effect and codec will return the same result"""
sample_rate = 8000
frames_per_chunk = 256
effector = AudioEffector(effect=None, format=None)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
# one-go
output = effector.apply(original, sample_rate)
self.assertEqual(original, output)
# streaming
for i, chunk in enumerate(effector.stream(original, sample_rate, frames_per_chunk)):
start = i * frames_per_chunk
end = (i + 1) * frames_per_chunk
self.assertEqual(original[start:end, :], chunk)
@parameterized.expand(
[
("ogg", "flac"), # flac only supports s16 and s32
("ogg", "opus"), # opus only supports 48k Hz
("ogg", "vorbis"), # vorbis only supports stereo
# ("ogg", "vorbis", 44100),
# this fails with small descrepancy; 441024 vs 441000
# TODO: investigate
("wav", None),
("wav", "pcm_u8"),
("mp3", None),
("mulaw", None, 44100), # mulaw is encoded without header
]
)
def test_formats(self, format, encoder, sample_rate=8000):
"""Formats (some with restrictions) just work without an issue in effector"""
effector = AudioEffector(format=format, encoder=encoder)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
# On 4.1 OPUS produces 8020 samples (extra 20)
# this has been fixed on 4.2+
if encoder == "opus" and lt42():
return
self.assertEqual(original.shape, output.shape)
# Note
# MP3 adds padding which cannot be removed when the encoded data is written to
# file-like object without seek method.
# The number of padding is retrievable as `AVCoedcContext::initial_padding`
# https://ffmpeg.org/doxygen/4.1/structAVCodecContext.html#a8f95550ce04f236e9915516d04d3d1ab
# but this is not exposed yet.
# These "priming" samples have negative time stamp, so we can also add logic
# to discard them at decoding, however, as far as I checked, when data is loaded
# with StreamReader, the time stamp is reset. I tried options like avoid_negative_ts,
# https://ffmpeg.org/ffmpeg-formats.html
# but it made no difference. Perhaps this is because the information about negative
# timestamp is only available at encoding side, and it presumably is written to
# header file, but it is not happening somehow with file-like object.
# Need to investigate more to remove MP3 padding
if format == "mp3":
return
for chunk in effector.stream(original, sample_rate, frames_per_chunk=original.size(0)):
self.assertEqual(original.shape, chunk.shape)
@parameterized.expand([("loudnorm=I=-16:LRA=11:TP=-1.5",), ("volume=2",)])
def test_effect(self, effect):
sample_rate = 8000
effector = AudioEffector(effect=effect)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
self.assertEqual(original.shape, output.shape)
def test_resample(self):
"""Resample option allows to change the sampling rate"""
sample_rate = 8000
output_sample_rate = 16000
num_channels = 3
effector = AudioEffector(effect="lowpass")
original = get_sinusoid(n_channels=num_channels, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate, output_sample_rate)
self.assertEqual(output.shape, [output_sample_rate, num_channels])
for chunk in effector.stream(
original, sample_rate, output_sample_rate=output_sample_rate, frames_per_chunk=output_sample_rate
):
self.assertEqual(chunk.shape, [output_sample_rate, num_channels])
|
from parameterized import parameterized
from torchaudio.io import AudioEffector
from torchaudio_unittest.common_utils import get_sinusoid, skipIfNoFFmpeg, TorchaudioTestCase
from .common import lt42
@skipIfNoFFmpeg
class EffectorTest(TorchaudioTestCase):
def test_null(self):
"""No effect and codec will return the same result"""
sample_rate = 8000
frames_per_chunk = 256
effector = AudioEffector(effect=None, format=None)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
# one-go
output = effector.apply(original, sample_rate)
self.assertEqual(original, output)
# streaming
for i, chunk in enumerate(effector.stream(original, sample_rate, frames_per_chunk)):
start = i * frames_per_chunk
end = (i + 1) * frames_per_chunk
self.assertEqual(original[start:end, :], chunk)
@parameterized.expand(
[
("ogg", "flac"), # flac only supports s16 and s32
("ogg", "opus"), # opus only supports 48k Hz
("ogg", "vorbis"), # vorbis only supports stereo
# ("ogg", "vorbis", 44100),
# this fails with small descrepancy; 441024 vs 441000
# TODO: investigate
("wav", None),
("wav", "pcm_u8"),
("mp3", None),
("mulaw", None, 44100), # mulaw is encoded without header
]
)
def test_formats(self, format, encoder, sample_rate=8000):
"""Formats (some with restrictions) just work without an issue in effector"""
effector = AudioEffector(format=format, encoder=encoder)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
# On 4.1 OPUS produces 8020 samples (extra 20)
# this has been fixed on 4.2+
if encoder == "opus" and lt42():
return
self.assertEqual(original.shape, output.shape)
# Note
# MP3 adds padding which cannot be removed when the encoded data is written to
# file-like object without seek method.
# The number of padding is retrievable as `AVCoedcContext::initial_padding`
# https://ffmpeg.org/doxygen/4.1/structAVCodecContext.html#a8f95550ce04f236e9915516d04d3d1ab
# but this is not exposed yet.
# These "priming" samples have negative time stamp, so we can also add logic
# to discard them at decoding, however, as far as I checked, when data is loaded
# with StreamReader, the time stamp is reset. I tried options like avoid_negative_ts,
# https://ffmpeg.org/ffmpeg-formats.html
# but it made no difference. Perhaps this is because the information about negative
# timestamp is only available at encoding side, and it presumably is written to
# header file, but it is not happening somehow with file-like object.
# Need to investigate more to remove MP3 padding
if format == "mp3":
return
for chunk in effector.stream(original, sample_rate, frames_per_chunk=original.size(0)):
self.assertEqual(original.shape, chunk.shape)
@parameterized.expand([("loudnorm=I=-16:LRA=11:TP=-1.5",), ("volume=2",)])
def test_effect(self, effect):
sample_rate = 8000
effector = AudioEffector(effect=effect)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
self.assertEqual(original.shape, output.shape)
|
import numpy as np
from numpy.typing import ArrayLike
def oscillator_bank(
frequencies: ArrayLike,
amplitudes: ArrayLike,
sample_rate: float,
time_axis: int = -2,
) -> ArrayLike:
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(invalid, 0.0, amplitudes)
pi2 = 2.0 * np.pi
freqs = frequencies * pi2 / sample_rate % pi2
phases = np.cumsum(freqs, axis=time_axis, dtype=freqs.dtype)
waveform = amplitudes * np.sin(phases)
return waveform
def sinc_ir(cutoff: ArrayLike, window_size: int = 513, high_pass: bool = False):
if window_size % 2 == 0:
raise ValueError(f"`window_size` must be odd. Given: {window_size}")
half = window_size // 2
dtype = cutoff.dtype
idx = np.linspace(-half, half, window_size, dtype=dtype)
filt = np.sinc(cutoff[..., None] * idx[None, ...])
filt *= np.hamming(window_size).astype(dtype)[None, ...]
filt /= np.abs(filt.sum(axis=-1, keepdims=True))
if high_pass:
filt *= -1
filt[..., half] = 1.0 + filt[..., half]
return filt
def freq_ir(magnitudes):
ir = np.fft.fftshift(np.fft.irfft(magnitudes), axes=-1)
window = np.hanning(ir.shape[-1])
return (ir * window).astype(magnitudes.dtype)
|
import numpy as np
from numpy.typing import ArrayLike
def oscillator_bank(
frequencies: ArrayLike,
amplitudes: ArrayLike,
sample_rate: float,
time_axis: int = -2,
) -> ArrayLike:
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(invalid, 0.0, amplitudes)
pi2 = 2.0 * np.pi
freqs = frequencies * pi2 / sample_rate % pi2
phases = np.cumsum(freqs, axis=time_axis, dtype=freqs.dtype)
waveform = amplitudes * np.sin(phases)
return waveform
def sinc_ir(cutoff: ArrayLike, window_size: int = 513, high_pass: bool = False):
if window_size % 2 == 0:
raise ValueError(f"`window_size` must be odd. Given: {window_size}")
half = window_size // 2
dtype = cutoff.dtype
idx = np.linspace(-half, half, window_size, dtype=dtype)
filt = np.sinc(cutoff[..., None] * idx[None, ...])
filt *= np.hamming(window_size).astype(dtype)[None, ...]
filt /= np.abs(filt.sum(axis=-1, keepdims=True))
if high_pass:
filt *= -1
filt[..., half] = 1.0 + filt[..., half]
return filt
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coarse_mask_head import CoarseMaskHead
from .dynamic_mask_head import DynamicMaskHead
from .fcn_mask_head import FCNMaskHead
from .feature_relay_head import FeatureRelayHead
from .fused_semantic_head import FusedSemanticHead
from .global_context_head import GlobalContextHead
from .grid_head import GridHead
from .htc_mask_head import HTCMaskHead
from .mask_point_head import MaskPointHead
from .maskiou_head import MaskIoUHead
from .scnet_mask_head import SCNetMaskHead
from .scnet_semantic_head import SCNetSemanticHead
__all__ = [
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead',
'DynamicMaskHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coarse_mask_head import CoarseMaskHead
from .fcn_mask_head import FCNMaskHead
from .feature_relay_head import FeatureRelayHead
from .fused_semantic_head import FusedSemanticHead
from .global_context_head import GlobalContextHead
from .grid_head import GridHead
from .htc_mask_head import HTCMaskHead
from .mask_point_head import MaskPointHead
from .maskiou_head import MaskIoUHead
from .scnet_mask_head import SCNetMaskHead
from .scnet_semantic_head import SCNetSemanticHead
__all__ = [
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead'
]
|
from jina.clients.mixin import AsyncHealthCheckMixin, AsyncPostMixin, AsyncProfileMixin
from jina.orchestrate.flow.base import Flow
class AsyncFlow(AsyncPostMixin, AsyncProfileMixin, AsyncHealthCheckMixin, Flow):
"""
Asynchronous version of :class:`jina.Flow`. They share the same interface, except
in :class:`AsyncFlow` :meth:`train`, :meth:`index`, :meth:`search` methods are coroutines
(i.e. declared with the async/await syntax), simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an eventloop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncFlow` can be very useful in
the integration settings, where Jina/Jina Flow is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Flow`
is controlling and wrapping the eventloop internally, making the Flow looks synchronous from outside.
In particular, :class:`AsyncFlow` makes Jina usage in Jupyter Notebook more natural and reliable.
For example, the following code
will use the eventloop that already spawned in Jupyter/ipython to run Jina Flow (instead of creating a new one).
.. highlight:: python
.. code-block:: python
from jina import AsyncFlow
from jina.types.document.generators import from_ndarray
import numpy as np
with AsyncFlow().add() as f:
await f.index(from_ndarray(np.random.random([5, 4])), on_done=print)
Notice that the above code will NOT work in standard Python REPL, as only Jupyter/ipython implements "autoawait".
.. seealso::
Asynchronous in REPL: Autoawait
https://ipython.readthedocs.io/en/stable/interactive/autoawait.html
Another example is when using Jina as an integration. Say you have another IO-bounded job ``heavylifting()``, you
can use this feature to schedule Jina ``index()`` and ``heavylifting()`` concurrently.
One can think of :class:`Flow` as Jina-managed eventloop, whereas :class:`AsyncFlow` is self-managed eventloop.
"""
|
from jina.clients.mixin import AsyncPostMixin
from jina.orchestrate.flow.base import Flow
class AsyncFlow(AsyncPostMixin, Flow):
"""
Asynchronous version of :class:`jina.Flow`. They share the same interface, except
in :class:`AsyncFlow` :meth:`train`, :meth:`index`, :meth:`search` methods are coroutines
(i.e. declared with the async/await syntax), simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an eventloop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncFlow` can be very useful in
the integration settings, where Jina/Jina Flow is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Flow`
is controlling and wrapping the eventloop internally, making the Flow looks synchronous from outside.
In particular, :class:`AsyncFlow` makes Jina usage in Jupyter Notebook more natural and reliable.
For example, the following code
will use the eventloop that already spawned in Jupyter/ipython to run Jina Flow (instead of creating a new one).
.. highlight:: python
.. code-block:: python
from jina import AsyncFlow
from jina.types.document.generators import from_ndarray
import numpy as np
with AsyncFlow().add() as f:
await f.index(from_ndarray(np.random.random([5, 4])), on_done=print)
Notice that the above code will NOT work in standard Python REPL, as only Jupyter/ipython implements "autoawait".
.. seealso::
Asynchronous in REPL: Autoawait
https://ipython.readthedocs.io/en/stable/interactive/autoawait.html
Another example is when using Jina as an integration. Say you have another IO-bounded job ``heavylifting()``, you
can use this feature to schedule Jina ``index()`` and ``heavylifting()`` concurrently.
One can think of :class:`Flow` as Jina-managed eventloop, whereas :class:`AsyncFlow` is self-managed eventloop.
"""
|
import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: Tensor
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da._get_documents_attribute('tensor')
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da._get_documents_attribute('text')
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: Tensor
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da.tensor
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da.text
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
|
import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: Tensor
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da._get_documents_attribute('tensor')
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da._get_documents_attribute('text')
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: Tensor
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da.tensor
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da.text
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model, corpus_model, corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco-instance.py',
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco-instance.py',
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
__version__ = '0.1.0'
from docarray.array.array import DocumentArray
from docarray.document.document import BaseDocument as Document
from docarray.predefined_document import Image, Mesh3D, PointCloud3D, Text
__all__ = ['Document', 'DocumentArray', 'Image', 'Text', 'Mesh3D', 'PointCloud3D']
|
__version__ = '0.1.0'
from docarray.array import DocumentArray
from docarray.document.document import BaseDocument as Document
from docarray.predefined_document import Image, Mesh3D, PointCloud3D, Text
__all__ = ['Document', 'DocumentArray', 'Image', 'Text', 'Mesh3D', 'PointCloud3D']
|
_base_ = './rtmdet_s_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './rtmdet_s_8xb32-300e_coco.py'
checkpoint = 'TODO:imagenet_pretrain' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _document_to_elastic(self, doc: 'Document') -> Dict:
return {
"_op_type": "index",
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
}
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
import functools
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
@functools.cache
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
# Load all Block instances from the available modules
available_blocks: dict[str, type["Block"]] = {}
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
continue
if not class_name.endswith("Block"):
raise ValueError(
f"Block class {class_name} does not end with 'Block'. "
"If you are creating an abstract class, "
"please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in available_blocks:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Ensure all fields in input_schema and output_schema are annotated SchemaFields
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(
f"{block.name} has a boolean field with no default value"
)
available_blocks[block.id] = block_cls
return available_blocks
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
|
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
_AVAILABLE_BLOCKS: dict[str, type["Block"]] = {}
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
if _AVAILABLE_BLOCKS:
return _AVAILABLE_BLOCKS
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
continue
if not class_name.endswith("Block"):
raise ValueError(
f"Block class {class_name} does not end with 'Block'. "
"If you are creating an abstract class, "
"please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in _AVAILABLE_BLOCKS:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Ensure all fields in input_schema and output_schema are annotated SchemaFields
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(
f"{block.name} has a boolean field with no default value"
)
_AVAILABLE_BLOCKS[block.id] = block_cls
return _AVAILABLE_BLOCKS
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina import helper
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
gp.add_argument(
'--port-monitoring',
type=str,
default=str(helper.random_port()),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
assert isinstance(tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
import argparse
from abc import ABC
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _create_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
|
import argparse
from abc import ABC
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.networking import GrpcConnectionPool
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.graph.topology_graph import TopologyGraph
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _set_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
def _set_topology_graph(self):
# check if it should be in K8s, maybe ConnectionPoolFactory to be created
import json
graph_description = json.loads(self.args.graph_description)
graph_conditions = json.loads(self.args.graph_conditions)
deployments_disable_reduce = json.loads(self.args.deployments_disable_reduce)
self._topology_graph = TopologyGraph(
graph_description,
graph_conditions,
deployments_disable_reduce,
timeout_send=self.timeout_send,
retries=self.args.retries,
)
def _set_connection_pool(self):
import json
deployments_addresses = json.loads(self.args.deployments_addresses)
# add the connections needed
self._connection_pool = GrpcConnectionPool(
logger=self.logger,
compression=self.args.compression,
metrics_registry=self.metrics_registry,
)
for deployment_name, addresses in deployments_addresses.items():
for address in addresses:
self._connection_pool.add_connection(
deployment=deployment_name, address=address, head=True
)
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
from time import time
import matplotlib.pyplot as plt
from scipy.stats import loguniform
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import PCA
from sklearn.metrics import ConfusionMatrixDisplay, classification_report
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
# %%
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
# %%
# Split into a training set and a test and keep 25% of the data for testing.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# %%
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print(
"Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])
)
t0 = time()
pca = PCA(n_components=n_components, svd_solver="randomized", whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
# %%
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {
"C": loguniform(1e3, 1e5),
"gamma": loguniform(1e-4, 1e-1),
}
clf = RandomizedSearchCV(
SVC(kernel="rbf", class_weight="balanced"), param_grid, n_iter=10
)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
# %%
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
ConfusionMatrixDisplay.from_estimator(
clf, X_test_pca, y_test, display_labels=target_names, xticks_rotation="vertical"
)
plt.tight_layout()
plt.show()
# %%
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=0.01, right=0.99, top=0.90, hspace=0.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# %%
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(" ", 1)[-1]
true_name = target_names[y_test[i]].rsplit(" ", 1)[-1]
return "predicted: %s\ntrue: %s" % (pred_name, true_name)
prediction_titles = [
title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])
]
plot_gallery(X_test, prediction_titles, h, w)
# %%
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
# %%
# Face recognition problem would be much more effectively solved by training
# convolutional neural networks but this family of models is outside of the scope of
# the scikit-learn library. Interested readers should instead try to use pytorch or
# tensorflow to implement such models.
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
from time import time
import matplotlib.pyplot as plt
from scipy.stats import loguniform
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import PCA
from sklearn.metrics import ConfusionMatrixDisplay, classification_report
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
# %%
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
# %%
# Split into a training set and a test and keep 25% of the data for testing.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# %%
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print(
"Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])
)
t0 = time()
pca = PCA(n_components=n_components, svd_solver="randomized", whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
# %%
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {
"C": loguniform(1e3, 1e5),
"gamma": loguniform(1e-4, 1e-1),
}
clf = RandomizedSearchCV(
SVC(kernel="rbf", class_weight="balanced"), param_grid, n_iter=10
)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
# %%
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
ConfusionMatrixDisplay.from_estimator(
clf, X_test_pca, y_test, display_labels=target_names, xticks_rotation="vertical"
)
plt.tight_layout()
plt.show()
# %%
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=0.01, right=0.99, top=0.90, hspace=0.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# %%
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(" ", 1)[-1]
true_name = target_names[y_test[i]].rsplit(" ", 1)[-1]
return "predicted: %s\ntrue: %s" % (pred_name, true_name)
prediction_titles = [
title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])
]
plot_gallery(X_test, prediction_titles, h, w)
# %%
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
# %%
# Face recognition problem would be much more effectively solved by training
# convolutional neural networks but this family of models is outside of the scope of
# the scikit-learn library. Interested readers should instead try to use pytorch or
# tensorflow to implement such models.
|
"""
Script to generate meta.json to store metadata for a nightly build of
XGBoost Python package.
"""
import argparse
import json
import pathlib
def main(args: argparse.Namespace) -> None:
wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve()
if not wheel_path.exists():
raise ValueError(f"Wheel cannot be found at path {wheel_path}")
if not wheel_path.is_file():
raise ValueError(f"Path {wheel_path} is not a valid file")
wheel_name = wheel_path.name
meta_path = pathlib.Path(args.meta_path)
if not meta_path.exists():
raise ValueError(f"Path {meta_path} does not exist")
if not meta_path.is_dir():
raise ValueError(f"Path {meta_path} is not a valid directory")
tokens = wheel_name.split("-")
assert len(tokens) == 5
version = tokens[1].split("+")[0]
meta_info = {
"wheel_path": f"{args.commit_hash}/{wheel_name}",
"wheel_name": wheel_name,
"platform_tag": args.platform_tag,
"version": version,
"commit_id": args.commit_hash,
}
with open(meta_path / "meta.json", "w") as f:
json.dump(meta_info, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Format meta.json encoding the latest nightly version of the Python wheel"
)
parser.add_argument(
"--wheel-path", type=str, required=True, help="Path to the wheel"
)
parser.add_argument(
"--commit-hash", type=str, required=True, help="Git commit hash"
)
parser.add_argument(
"--platform-tag",
type=str,
required=True,
help="Platform tag (e.g. manylinux_2_28_x86_64)",
)
parser.add_argument(
"--meta-path", type=str, required=True, help="Directory to place meta.json"
)
parsed_args = parser.parse_args()
main(parsed_args)
|
"""
Script to generate meta.json to store metadata for a nightly build of
XGBoost Python package.
"""
import argparse
import json
import pathlib
def main(args: argparse.Namespace) -> None:
wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve()
if not wheel_path.exists():
raise ValueError(f"Wheel cannot be found at path {wheel_path}")
if not wheel_path.is_file():
raise ValueError(f"Path {wheel_path} is not a valid file")
wheel_name = wheel_path.name
meta_path = pathlib.Path(args.meta_path)
if not meta_path.exists():
raise ValueError(f"Path {meta_path} does not exist")
if not meta_path.is_dir():
raise ValueError(f"Path {meta_path} is not a valid directory")
tokens = wheel_name.split("-")
assert len(tokens) == 5
version = tokens[1].split("+")[0]
meta_info = {
"wheel_name": wheel_name,
"platform_tag": args.platform_tag,
"version": version,
"commit_id": args.commit_hash,
}
with open(meta_path / "meta.json", "w") as f:
json.dump(meta_info, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Format meta.json encoding the latest nightly version of the Python wheel"
)
parser.add_argument(
"--wheel-path", type=str, required=True, help="Path to the wheel"
)
parser.add_argument(
"--commit-hash", type=str, required=True, help="Git commit hash"
)
parser.add_argument(
"--platform-tag",
type=str,
required=True,
help="Platform tag (e.g. manylinux_2_28_x86_64)",
)
parser.add_argument(
"--meta-path", type=str, required=True, help="Directory to place meta.json"
)
parsed_args = parser.parse_args()
main(parsed_args)
|
import string
from typing import Any
from langchain.evaluation.schema import StringEvaluator
class ExactMatchStringEvaluator(StringEvaluator):
"""Compute an exact match between the prediction and the reference.
Examples
----------
>>> evaluator = ExactMatchChain()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="Mindy is the CTO",
) # This will return {'score': 1.0}
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="Mindy is the CEO",
) # This will return {'score': 0.0}
"""
def __init__(
self,
*,
ignore_case: bool = False,
ignore_punctuation: bool = False,
ignore_numbers: bool = False,
**kwargs: Any,
):
super().__init__()
self.ignore_case = ignore_case
self.ignore_punctuation = ignore_punctuation
self.ignore_numbers = ignore_numbers
@property
def requires_input(self) -> bool:
"""
This evaluator does not require input.
"""
return False
@property
def requires_reference(self) -> bool:
"""
This evaluator requires a reference.
"""
return True
@property
def input_keys(self) -> list[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return "exact_match"
def _evaluate_strings( # type: ignore[override]
self,
*,
prediction: str,
reference: str,
**kwargs: Any,
) -> dict:
"""
Evaluate the exact match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference string.
Returns:
dict: The evaluation results containing the score.
"""
if self.ignore_case:
prediction = prediction.lower()
reference = reference.lower()
if self.ignore_punctuation:
prediction = prediction.translate(str.maketrans("", "", string.punctuation))
reference = reference.translate(str.maketrans("", "", string.punctuation))
if self.ignore_numbers:
prediction = prediction.translate(str.maketrans("", "", string.digits))
reference = reference.translate(str.maketrans("", "", string.digits))
return {"score": int(prediction == reference)}
|
import string
from typing import Any
from langchain.evaluation.schema import StringEvaluator
class ExactMatchStringEvaluator(StringEvaluator):
"""Compute an exact match between the prediction and the reference.
Examples
----------
>>> evaluator = ExactMatchChain()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="Mindy is the CTO",
) # This will return {'score': 1.0}
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="Mindy is the CEO",
) # This will return {'score': 0.0}
"""
def __init__(
self,
*,
ignore_case: bool = False,
ignore_punctuation: bool = False,
ignore_numbers: bool = False,
**kwargs: Any,
):
super().__init__()
self.ignore_case = ignore_case
self.ignore_punctuation = ignore_punctuation
self.ignore_numbers = ignore_numbers
@property
def requires_input(self) -> bool:
"""
This evaluator does not require input.
"""
return False
@property
def requires_reference(self) -> bool:
"""
This evaluator requires a reference.
"""
return True
@property
def input_keys(self) -> list[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return "exact_match"
def _evaluate_strings( # type: ignore[arg-type,override]
self,
*,
prediction: str,
reference: str,
**kwargs: Any,
) -> dict:
"""
Evaluate the exact match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference string.
Returns:
dict: The evaluation results containing the score.
"""
if self.ignore_case:
prediction = prediction.lower()
reference = reference.lower()
if self.ignore_punctuation:
prediction = prediction.translate(str.maketrans("", "", string.punctuation))
reference = reference.translate(str.maketrans("", "", string.punctuation))
if self.ignore_numbers:
prediction = prediction.translate(str.maketrans("", "", string.digits))
reference = reference.translate(str.maketrans("", "", string.digits))
return {"score": int(prediction == reference)}
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception: # noqa: PERF203
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
roi_head=dict(bbox_head=[
dict(
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
loss_weight=1.0)),
dict(
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
loss_weight=1.0)),
dict(
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0))
]))
|
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
roi_head=dict(bbox_head=[
dict(
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
loss_weight=1.0)),
dict(
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
loss_weight=1.0)),
dict(
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0))
]))
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import relu
class ReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_relu(self):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_normal_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.0, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([0.0, 0.0, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_leaky_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.5, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_threshold_relu_correctness(self):
relu_layer = relu.ReLU(max_value=8, negative_slope=0.0, threshold=5)
input = np.array([6.0, 7.0, 0.0, 5, 10])
expected_output = np.array([6.0, 7.0, 0.0, 0.0, 8.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"max_value of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": -10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError,
"negative_slope of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": -10,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError, "threshold of a ReLU layer cannot be a negative value"
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": -10,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import relu
class ReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_relu(self):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_normal_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.0, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([0.0, 0.0, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_leaky_relu_correctness(self):
relu_layer = relu.ReLU(max_value=10, negative_slope=0.5, threshold=0)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_threshold_relu_correctness(self):
relu_layer = relu.ReLU(max_value=8, negative_slope=0.0, threshold=5)
input = np.array([6.0, 7.0, 0.0, 5, 10])
expected_output = np.array([6.0, 7.0, 0.0, 0.0, 8.0])
result = relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"max_value of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": -10,
"negative_slope": 1,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError,
"negative_slope of a ReLU layer cannot be a negative value",
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": -10,
"threshold": 0.5,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
with self.assertRaisesRegex(
ValueError, "threshold of a ReLU layer cannot be a negative value"
):
self.run_layer_test(
relu.ReLU,
init_kwargs={
"max_value": 10,
"negative_slope": 1,
"threshold": -10,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
|
import os
import boto3
import fsspec
import pytest
from moto import mock_s3
from datasets.filesystems import (
COMPRESSION_FILESYSTEMS,
HfFileSystem,
S3FileSystem,
extract_path_from_uri,
is_remote_filesystem,
)
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key"
os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token"
os.environ["AWS_SESSION_TOKEN"] = "fake_session_token"
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
def test_extract_path_from_uri(s3):
mock_bucket = "moto-mock-s3-bucket"
# We need to create the bucket since this is all in Moto's 'virtual' AWS account
s3.create_bucket(Bucket=mock_bucket)
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem():
fs = S3FileSystem(key="fake_access_key", secret="fake_secret")
is_remote = is_remote_filesystem(fs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
import os
import boto3
import fsspec
import pytest
from moto import mock_s3
from datasets.filesystems import (
COMPRESSION_FILESYSTEMS,
HfFileSystem,
S3FileSystem,
extract_path_from_uri,
is_remote_filesystem,
)
from .utils import require_lz4, require_zstandard
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key"
os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token"
os.environ["AWS_SESSION_TOKEN"] = "fake_session_token"
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
def test_extract_path_from_uri(s3):
mock_bucket = "moto-mock-s3-bucket"
# We need to create the bucket since this is all in Moto's 'virtual' AWS account
s3.create_bucket(Bucket=mock_bucket)
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem():
fs = S3FileSystem(key="fake_access_key", secret="fake_secret")
is_remote = is_remote_filesystem(fs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
import numpy as np
def oscillator_bank(
frequencies,
amplitudes,
sample_rate: float,
time_axis: int = -2,
):
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(invalid, 0.0, amplitudes)
pi2 = 2.0 * np.pi
freqs = frequencies * pi2 / sample_rate % pi2
phases = np.cumsum(freqs, axis=time_axis, dtype=freqs.dtype)
waveform = amplitudes * np.sin(phases)
return waveform
def sinc_ir(cutoff, window_size: int = 513, high_pass: bool = False):
if window_size % 2 == 0:
raise ValueError(f"`window_size` must be odd. Given: {window_size}")
half = window_size // 2
dtype = cutoff.dtype
idx = np.linspace(-half, half, window_size, dtype=dtype)
filt = np.sinc(cutoff[..., None] * idx[None, ...])
filt *= np.hamming(window_size).astype(dtype)[None, ...]
filt /= np.abs(filt.sum(axis=-1, keepdims=True))
if high_pass:
filt *= -1
filt[..., half] = 1.0 + filt[..., half]
return filt
def freq_ir(magnitudes):
ir = np.fft.fftshift(np.fft.irfft(magnitudes), axes=-1)
window = np.hanning(ir.shape[-1])
return (ir * window).astype(magnitudes.dtype)
|
import numpy as np
from numpy.typing import ArrayLike
def oscillator_bank(
frequencies: ArrayLike,
amplitudes: ArrayLike,
sample_rate: float,
time_axis: int = -2,
) -> ArrayLike:
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(invalid, 0.0, amplitudes)
pi2 = 2.0 * np.pi
freqs = frequencies * pi2 / sample_rate % pi2
phases = np.cumsum(freqs, axis=time_axis, dtype=freqs.dtype)
waveform = amplitudes * np.sin(phases)
return waveform
def sinc_ir(cutoff: ArrayLike, window_size: int = 513, high_pass: bool = False):
if window_size % 2 == 0:
raise ValueError(f"`window_size` must be odd. Given: {window_size}")
half = window_size // 2
dtype = cutoff.dtype
idx = np.linspace(-half, half, window_size, dtype=dtype)
filt = np.sinc(cutoff[..., None] * idx[None, ...])
filt *= np.hamming(window_size).astype(dtype)[None, ...]
filt /= np.abs(filt.sum(axis=-1, keepdims=True))
if high_pass:
filt *= -1
filt[..., half] = 1.0 + filt[..., half]
return filt
def freq_ir(magnitudes):
ir = np.fft.fftshift(np.fft.irfft(magnitudes), axes=-1)
window = np.hanning(ir.shape[-1])
return (ir * window).astype(magnitudes.dtype)
|
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import RematScope
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import SymbolicScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import remat
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
from keras.api import visualization
from keras.api import wrappers
# END DO NOT EDIT.
import os # isort: skip
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import SymbolicScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
from keras.api import visualization
from keras.api import wrappers
# END DO NOT EDIT.
import os # isort: skip
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms_marco_dev_small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms_marco_dev_small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.41
MRR@10: 54.14
NDCG@10: 65.06
Model Query Sparsity: Active Dimensions: 42.2, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 126.5, Sparsity Ratio: 0.9959
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms_marco_dev_small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6506
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms_marco_dev_small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms_marco_dev_small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.61
MRR@10: 54.30
NDCG@10: 65.20
Model Query Sparsity: Active Dimensions: 43.9, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 128.4, Sparsity Ratio: 0.9958
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms_marco_dev_small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6520
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["transformer_sd3"] = ["SD3Transformer2DLoadersMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
"HunyuanVideoLoraLoaderMixin",
"SanaLoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = [
"IPAdapterMixin",
"SD3IPAdapterMixin",
]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .transformer_sd3 import SD3Transformer2DLoadersMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import (
IPAdapterMixin,
SD3IPAdapterMixin,
)
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
HunyuanVideoLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SanaLoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
"HunyuanVideoLoraLoaderMixin",
"SanaLoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import IPAdapterMixin
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
HunyuanVideoLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SanaLoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities'))
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='CocoDataset',
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
return_classes=True,
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=train_pipeline))
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
'language_model': dict(lr_mult=0.1),
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto'))
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities'))
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='CocoDataset',
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
return_classes=True,
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=train_pipeline))
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
# 'language_model': dict(lr_mult=0),
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto'))
load_from = ''
|
from sentence_transformers.similarity_functions import SimilarityFunction
__all__ = ["SimilarityFunction"]
|
from enum import Enum
class SimilarityFunction(Enum):
COSINE = 0
EUCLIDEAN = 1
MANHATTAN = 2
DOT_PRODUCT = 3
|
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
_base_ = './centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './centernet_update_r50_fpn_fp16_lsj_200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
DEMO_DIR = tm.demo_dir(__file__)
PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python")
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(PYTHON_DEMO_DIR, "update_process.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(PYTHON_DEMO_DIR, "categorical.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
def test_external_memory_demo():
script = os.path.join(PYTHON_DEMO_DIR, "external_memory.py")
cmd = ["python", script, "--device=cuda"]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_distributed_extmem_basic_demo():
script = os.path.join(PYTHON_DEMO_DIR, "distributed_extmem_basic.py")
cmd = ["python", script, "--device=cuda"]
subprocess.check_call(cmd)
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
DEMO_DIR = tm.demo_dir(__file__)
PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python")
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(PYTHON_DEMO_DIR, "update_process.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(PYTHON_DEMO_DIR, "categorical.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
def test_external_memory_demo():
script = os.path.join(PYTHON_DEMO_DIR, "external_memory.py")
cmd = ["python", script, "--device=cuda"]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_rmm())
@pytest.mark.skipif(**tm.no_cupy())
def test_distributed_extmem_basic_demo():
script = os.path.join(PYTHON_DEMO_DIR, "distributed_extmem_basic.py")
cmd = ["python", script, "--device=cuda"]
subprocess.check_call(cmd)
|
import os
import signal
from threading import Thread
from time import sleep
from typing import Optional
_IN_TOPLEVEL_PROCESS = True
def in_toplevel_process() -> bool:
global _IN_TOPLEVEL_PROCESS
return _IN_TOPLEVEL_PROCESS
# If this process dies abnormally (e.g. segfault)
# it will not shut down the workers. Instead,
# the workers will have their parent reassigned to the
# init process. This launches a separate thread to
# watch for the worker getting reassigned,
# and cleans it up in this case.
#
# This function cannot be an inner function since otherwise mp_context="spawn" would
# not work for ProcessPoolExecutor since inner functions cannot be pickled.
def _async_compile_initializer(orig_ppid: int) -> None:
import torch._C
def run() -> None:
while True:
sleep(1)
if orig_ppid != os.getppid():
os.kill(os.getpid(), signal.SIGKILL)
global _watchdog_thread, _original_parent
_original_parent = orig_ppid
_watchdog_thread = Thread(target=run, daemon=True)
_watchdog_thread.start()
# Ignore Ctrl-C (i.e. SIGINT) sent to pool workers to avoid meaningless log spam.
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Install a crash handler to print out the stacktrace for SEGV
torch._C._initCrashHandler()
# Set a bit to distinguish async_compile subprocesses from the toplevel process.
global _IN_TOPLEVEL_PROCESS
_IN_TOPLEVEL_PROCESS = False
_watchdog_thread: Optional[Thread] = None
_original_parent: Optional[int] = None
def has_parent_changed() -> bool:
return _original_parent != os.getppid()
|
import os
import signal
from threading import Thread
from time import sleep
from typing import Optional
_IN_TOPLEVEL_PROCESS = True
def in_toplevel_process() -> bool:
global _IN_TOPLEVEL_PROCESS
return _IN_TOPLEVEL_PROCESS
# If this process dies abnormally (e.g. segfault)
# it will not shut down the workers. Instead,
# the workers will have their parent reassigned to the
# init process. This launches a separate thread to
# watch for the worker getting reassigned,
# and cleans it up in this case.
#
# This function cannot be an inner function since otherwise mp_context="spawn" would
# not work for ProcessPoolExecutor since inner functions cannot be pickled.
def _async_compile_initializer(orig_ppid: int) -> None:
def run() -> None:
while True:
sleep(1)
if orig_ppid != os.getppid():
os.kill(os.getpid(), signal.SIGKILL)
global _watchdog_thread, _original_parent
_original_parent = orig_ppid
_watchdog_thread = Thread(target=run, daemon=True)
_watchdog_thread.start()
# Ignore Ctrl-C (i.e. SIGINT) sent to pool workers to avoid meaningless log spam.
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Set a bit to distinguish async_compile subprocesses from the toplevel process.
global _IN_TOPLEVEL_PROCESS
_IN_TOPLEVEL_PROCESS = False
_watchdog_thread: Optional[Thread] = None
_original_parent: Optional[int] = None
def has_parent_changed() -> bool:
return _original_parent != os.getppid()
|
"""Dump objects to json."""
import json
from typing import Any
from pydantic import BaseModel
from langchain_core.load.serializable import Serializable, to_json_not_implemented
def default(obj: Any) -> Any:
"""Return a default value for an object.
Args:
obj: The object to serialize to json if it is a Serializable object.
Returns:
A json serializable object or a SerializedNotImplemented object.
"""
if isinstance(obj, Serializable):
return obj.to_json()
else:
return to_json_not_implemented(obj)
def _dump_pydantic_models(obj: Any) -> Any:
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration
if (
isinstance(obj, ChatGeneration)
and isinstance(obj.message, AIMessage)
and (parsed := obj.message.additional_kwargs.get("parsed"))
and isinstance(parsed, BaseModel)
):
obj_copy = obj.model_copy(deep=True)
obj_copy.message.additional_kwargs["parsed"] = parsed.model_dump()
return obj_copy
else:
return obj
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
"""Return a json string representation of an object.
Args:
obj: The object to dump.
pretty: Whether to pretty print the json. If true, the json will be
indented with 2 spaces (if no indent is provided as part of kwargs).
Default is False.
kwargs: Additional arguments to pass to json.dumps
Returns:
A json string representation of the object.
Raises:
ValueError: If `default` is passed as a kwarg.
"""
if "default" in kwargs:
msg = "`default` should not be passed to dumps"
raise ValueError(msg)
try:
obj = _dump_pydantic_models(obj)
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(obj, default=default, indent=indent, **kwargs)
else:
return json.dumps(obj, default=default, **kwargs)
except TypeError:
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)
else:
return json.dumps(to_json_not_implemented(obj), **kwargs)
def dumpd(obj: Any) -> Any:
"""Return a dict representation of an object.
Note:
Unfortunately this function is not as efficient as it could be
because it first dumps the object to a json string and then loads it
back into a dictionary.
Args:
obj: The object to dump.
Returns:
dictionary that can be serialized to json using json.dumps
"""
return json.loads(dumps(obj))
|
import json
from typing import Any
from pydantic import BaseModel
from langchain_core.load.serializable import Serializable, to_json_not_implemented
def default(obj: Any) -> Any:
"""Return a default value for a Serializable object or
a SerializedNotImplemented object.
Args:
obj: The object to serialize to json if it is a Serializable object.
Returns:
A json serializable object or a SerializedNotImplemented object.
"""
if isinstance(obj, Serializable):
return obj.to_json()
else:
return to_json_not_implemented(obj)
def _dump_pydantic_models(obj: Any) -> Any:
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration
if (
isinstance(obj, ChatGeneration)
and isinstance(obj.message, AIMessage)
and (parsed := obj.message.additional_kwargs.get("parsed"))
and isinstance(parsed, BaseModel)
):
obj_copy = obj.model_copy(deep=True)
obj_copy.message.additional_kwargs["parsed"] = parsed.model_dump()
return obj_copy
else:
return obj
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
"""Return a json string representation of an object.
Args:
obj: The object to dump.
pretty: Whether to pretty print the json. If true, the json will be
indented with 2 spaces (if no indent is provided as part of kwargs).
Default is False.
kwargs: Additional arguments to pass to json.dumps
Returns:
A json string representation of the object.
Raises:
ValueError: If `default` is passed as a kwarg.
"""
if "default" in kwargs:
msg = "`default` should not be passed to dumps"
raise ValueError(msg)
try:
obj = _dump_pydantic_models(obj)
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(obj, default=default, indent=indent, **kwargs)
else:
return json.dumps(obj, default=default, **kwargs)
except TypeError:
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)
else:
return json.dumps(to_json_not_implemented(obj), **kwargs)
def dumpd(obj: Any) -> Any:
"""Return a dict representation of an object.
Note:
Unfortunately this function is not as efficient as it could be
because it first dumps the object to a json string and then loads it
back into a dictionary.
Args:
obj: The object to dump.
Returns:
dictionary that can be serialized to json using json.dumps
"""
return json.loads(dumps(obj))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Checkout the paper introducing this method : https://arxiv.org/pdf/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Checkout the paper introducing this method : https://arxiv.org/pdf/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return False
|
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.audio_url import AudioUrl
from docarray.typing.url.image_url import ImageUrl
from docarray.typing.url.text_url import TextUrl
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
from docarray.typing.url.video_url import VideoUrl
__all__ = [
'ImageUrl',
'AudioUrl',
'AnyUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
]
|
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.audio_url import AudioUrl
from docarray.typing.url.image_url import ImageUrl
from docarray.typing.url.text_url import TextUrl
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
__all__ = ['ImageUrl', 'AudioUrl', 'AnyUrl', 'TextUrl', 'Mesh3DUrl', 'PointCloud3DUrl']
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
det_data_sample = DetDataSample()
det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_pred=False)
# test out_file
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_gt=False, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
det_data_sample = DetDataSample()
det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
det_data_sample = DetDataSample()
det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_pred=False)
# test out_file
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_gt=False, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
det_data_sample = DetDataSample()
det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDocument):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDocument):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from __future__ import annotations
import torch.nn as nn
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
# TODO: Not yet for sparse might want it ?
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, SparseCoSENTLoss is recommended.
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than SparseCosineSimilarityLoss.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCosineSimilarityLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
|
from __future__ import annotations
import torch.nn as nn
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
# TODO: Not yet for sparse might want it ?
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, SparseCoSENTLoss is recommended.
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than SparseCosineSimilarityLoss.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCosineSimilarityLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .config import Config, ConfigDict, DictAction
__all__ = ['Config', 'ConfigDict', 'DictAction']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .config import Config, ConfigDict, DictAction
from .get_config_model import get_config, get_model
__all__ = ['Config', 'ConfigDict', 'DictAction', 'get_config', 'get_model']
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.add_argument(
'classes', type=str, help='The text file name of storage class list')
parser.add_argument(
'out',
type=str,
help='The output annotation json file name, The save dir is in the '
'same directory as img_path')
parser.add_argument(
'-e',
'--exclude-extensions',
type=str,
nargs='+',
help='The suffix of images to be excluded, such as "png" and "bmp"')
args = parser.parse_args()
return args
def collect_image_infos(path, exclude_extensions=None):
img_infos = []
images_generator = mmcv.scandir(path, recursive=True)
for image_path in mmcv.track_iter_progress(list(images_generator)):
if exclude_extensions is None or (
exclude_extensions is not None
and not image_path.lower().endswith(exclude_extensions)):
image_path = os.path.join(path, image_path)
img_pillow = Image.open(image_path)
img_info = {
'filename': image_path,
'width': img_pillow.width,
'height': img_pillow.height,
}
img_infos.append(img_info)
return img_infos
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for category_id, name in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco
def main():
args = parse_args()
assert args.out.endswith(
'json'), 'The output file name must be json suffix'
# 1 load image list info
img_infos = collect_image_infos(args.img_path, args.exclude_extensions)
# 2 convert to coco format data
classes = mmcv.list_from_file(args.classes)
coco_info = cvt_to_coco_json(img_infos, classes)
# 3 dump
save_dir = os.path.join(args.img_path, '..', 'annotations')
mmcv.mkdir_or_exist(save_dir)
save_path = os.path.join(save_dir, args.out)
mmcv.dump(coco_info, save_path)
print(f'save json file: {save_path}')
if __name__ == '__main__':
main()
|
import argparse
import os
import mmcv
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.add_argument(
'classes', type=str, help='The text file name of storage class list')
parser.add_argument(
'out',
type=str,
help='The output annotation json file name, The save dir is in the '
'same directory as img_path')
parser.add_argument(
'-e',
'--exclude-extensions',
type=str,
nargs='+',
help='The suffix of images to be excluded, such as "png" and "bmp"')
args = parser.parse_args()
return args
def collect_image_infos(path, exclude_extensions=None):
img_infos = []
images_generator = mmcv.scandir(path, recursive=True)
for image_path in mmcv.track_iter_progress(list(images_generator)):
if exclude_extensions is None or (
exclude_extensions is not None
and not image_path.lower().endswith(exclude_extensions)):
image_path = os.path.join(path, image_path)
img_pillow = Image.open(image_path)
img_info = {
'filename': image_path,
'width': img_pillow.width,
'height': img_pillow.height,
}
img_infos.append(img_info)
return img_infos
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for category_id, name in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco
def main():
args = parse_args()
assert args.out.endswith(
'json'), 'The output file name must be json suffix'
# 1 load image list info
img_infos = collect_image_infos(args.img_path, args.exclude_extensions)
# 2 convert to coco format data
classes = mmcv.list_from_file(args.classes)
coco_info = cvt_to_coco_json(img_infos, classes)
# 3 dump
save_dir = os.path.join(args.img_path, '..', 'annotations')
mmcv.mkdir_or_exist(save_dir)
save_path = os.path.join(save_dir, args.out)
mmcv.dump(coco_info, save_path)
print(f'save json file: {save_path}')
if __name__ == '__main__':
main()
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_simple_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_simple_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class Summarization(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({"summary": Value("string")})
text_column: str = "text"
summary_column: str = "summary"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
|
from dataclasses import dataclass
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class Summarization(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = "summarization"
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({"summary": Value("string")})
text_column: str = "text"
summary_column: str = "summary"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
|
import os
from pathlib import Path
from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
# Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [
"Please call Stella",
"Ask her to bring these things",
"with her from the store",
"Six spoons of fresh snow peas, five thick slabs of blue cheese, and maybe a snack for her brother Bob",
"We also need a small plastic snake and a big toy frog for the kids",
"She can scoop these things into three red bags, and we will go meet her Wednesday at the train station",
"When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow",
"The rainbow is a division of white light into many beautiful colors",
"These take the shape of a long round arch, with its path high above, and its two ends \
apparently beyond the horizon",
"There is, according to legend, a boiling pot of gold at one end",
]
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = []
dataset_dir = os.path.join(root_dir, "VCTK-Corpus-0.92")
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 48000
seed = 0
for speaker in range(225, 230):
speaker_id = "p" + str(speaker)
audio_dir = os.path.join(dataset_dir, "wav48_silence_trimmed", speaker_id)
os.makedirs(audio_dir, exist_ok=True)
file_dir = os.path.join(dataset_dir, "txt", speaker_id)
os.makedirs(file_dir, exist_ok=True)
for utterance_id in range(1, 11):
filename = f"{speaker_id}_{utterance_id:03d}_mic2"
audio_file_path = os.path.join(audio_dir, filename + ".wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(audio_file_path, data, sample_rate)
txt_file_path = os.path.join(file_dir, filename[:-5] + ".txt")
transcript = _TRANSCRIPT[utterance_id - 1]
with open(txt_file_path, "w") as f:
f.write(transcript)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, utterance_id)
mocked_samples.append(sample)
seed += 1
return mocked_samples
class TestVCTK(TempDirMixin, TorchaudioTestCase):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_vctk(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert int(utterance_id) == self.samples[i][4]
num_samples += 1
assert num_samples == len(self.samples)
def test_vctk_str(self):
dataset = vctk.VCTK_092(self.root_dir, audio_ext=".wav")
self._test_vctk(dataset)
def test_vctk_path(self):
dataset = vctk.VCTK_092(Path(self.root_dir), audio_ext=".wav")
self._test_vctk(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
# Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [
"Please call Stella",
"Ask her to bring these things",
"with her from the store",
"Six spoons of fresh snow peas, five thick slabs of blue cheese, and maybe a snack for her brother Bob",
"We also need a small plastic snake and a big toy frog for the kids",
"She can scoop these things into three red bags, and we will go meet her Wednesday at the train station",
"When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow",
"The rainbow is a division of white light into many beautiful colors",
"These take the shape of a long round arch, with its path high above, and its two ends \
apparently beyond the horizon",
"There is, according to legend, a boiling pot of gold at one end",
]
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = []
dataset_dir = os.path.join(root_dir, "VCTK-Corpus-0.92")
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 48000
seed = 0
for speaker in range(225, 230):
speaker_id = "p" + str(speaker)
audio_dir = os.path.join(dataset_dir, "wav48_silence_trimmed", speaker_id)
os.makedirs(audio_dir, exist_ok=True)
file_dir = os.path.join(dataset_dir, "txt", speaker_id)
os.makedirs(file_dir, exist_ok=True)
for utterance_id in range(1, 11):
filename = f"{speaker_id}_{utterance_id:03d}_mic2"
audio_file_path = os.path.join(audio_dir, filename + ".wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(audio_file_path, data, sample_rate)
txt_file_path = os.path.join(file_dir, filename[:-5] + ".txt")
transcript = _TRANSCRIPT[utterance_id - 1]
with open(txt_file_path, "w") as f:
f.write(transcript)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, utterance_id)
mocked_samples.append(sample)
seed += 1
return mocked_samples
class TestVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_vctk(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert int(utterance_id) == self.samples[i][4]
num_samples += 1
assert num_samples == len(self.samples)
def test_vctk_str(self):
dataset = vctk.VCTK_092(self.root_dir, audio_ext=".wav")
self._test_vctk(dataset)
def test_vctk_path(self):
dataset = vctk.VCTK_092(Path(self.root_dir), audio_ext=".wav")
self._test_vctk(dataset)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
"""Dataset for PASCAL VOC."""
METAINFO = {
'classes':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199),
(0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60),
(163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100),
(183, 130, 88)]
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'VOC2007' in self.sub_data_root:
self._metainfo['dataset_type'] = 'VOC2007'
elif 'VOC2012' in self.sub_data_root:
self._metainfo['dataset_type'] = 'VOC2012'
else:
self._metainfo['dataset_type'] = None
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
"""Dataset for PASCAL VOC."""
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199),
(0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60),
(163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100),
(183, 130, 88)]
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'VOC2007' in self.sub_data_root:
self._metainfo['DATASET_TYPE'] = 'VOC2007'
elif 'VOC2012' in self.sub_data_root:
self._metainfo['DATASET_TYPE'] = 'VOC2012'
else:
self._metainfo['DATASET_TYPE'] = None
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import (FilterAnnotations, LoadImageFromFile,
LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', channel_order='bgr', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
def _build_filter_annotations_args():
kwargs = (dict(min_gt_bbox_wh=(100, 100)),
dict(min_gt_bbox_wh=(100, 100), keep_empty=False),
dict(min_gt_bbox_wh=(1, 1)), dict(min_gt_bbox_wh=(.01, .01)),
dict(min_gt_bbox_wh=(.01, .01),
by_mask=True), dict(by_mask=True),
dict(by_box=False, by_mask=True))
targets = (None, 0, 1, 2, 1, 1, 1)
return list(zip(targets, kwargs))
@pytest.mark.parametrize('target, kwargs', _build_filter_annotations_args())
def test_filter_annotations(target, kwargs):
filter_ann = FilterAnnotations(**kwargs)
bboxes = np.array([[2., 10., 4., 14.], [2., 10., 2.1, 10.1]])
raw_masks = np.zeros((2, 24, 24))
raw_masks[0, 10:14, 2:4] = 1
bitmap_masks = BitmapMasks(raw_masks, 24, 24)
results = dict(gt_bboxes=bboxes, gt_masks=bitmap_masks)
results = filter_ann(results)
if results is not None:
results = results['gt_bboxes'].shape[0]
assert results == target
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', channel_order='bgr', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""
Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
inputs = json.loads("[" + inputs + "]")
graph.add_node(output, func_name=func_name, inputs=inputs)
for inp in inputs:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
breakpoint()
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
tool_output = await tools_by_name[func_name].acall(*input_values)
tool_outputs.append(tool_output)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = tool_output.raw_output
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""
Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
inputs = json.loads("[" + inputs + "]")
graph.add_node(output, func_name=func_name, inputs=inputs)
for inp in inputs:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
raw_tool_output = await tools_by_name[func_name].acall(
*input_values
)
tool_outputs.append(
ToolOutput(
content=str(raw_tool_output),
tool_name=func_name,
raw_output=raw_tool_output,
raw_input={"args": input_values},
is_error=False,
)
)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = str(raw_tool_output)
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
import warnings
from langchain_core.globals import get_debug as core_get_debug
from langchain_core.globals import get_verbose as core_get_verbose
from langchain_core.globals import set_debug as core_set_debug
from langchain_core.globals import set_verbose as core_set_verbose
from langchain.globals import get_debug, get_verbose, set_debug, set_verbose
def test_no_warning() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
get_debug()
set_debug(False)
get_verbose()
set_verbose(False)
core_get_debug()
core_set_debug(False)
core_get_verbose()
core_set_verbose(False)
def test_debug_is_settable_directly() -> None:
from langchain_core.callbacks.manager import _get_debug
import langchain
previous_value = langchain.debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.debug = not previous_value
new_value = langchain.debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_debug_is_settable_via_setter() -> None:
from langchain_core.callbacks.manager import _get_debug
from langchain import globals as langchain_globals
previous_value = langchain_globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_debug(not previous_value)
new_value = langchain_globals._debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_verbose_is_settable_directly() -> None:
import langchain
from langchain.chains.base import _get_verbosity
previous_value = langchain.verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.verbose = not previous_value
new_value = langchain.verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
def test_verbose_is_settable_via_setter() -> None:
from langchain import globals as langchain_globals
from langchain.chains.base import _get_verbosity
previous_value = langchain_globals._verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_verbose(not previous_value)
new_value = langchain_globals._verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
|
import warnings
from langchain_core.globals import get_debug as core_get_debug
from langchain_core.globals import get_verbose as core_get_verbose
from langchain_core.globals import set_debug as core_set_debug
from langchain_core.globals import set_verbose as core_set_verbose
from langchain.globals import get_debug, get_verbose, set_debug, set_verbose
def test_no_warning() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
get_debug()
set_debug(False)
get_verbose()
set_verbose(False)
core_get_debug()
core_set_debug(False)
core_get_verbose()
core_set_verbose(False)
def test_debug_is_settable_directly() -> None:
from langchain_core.callbacks.manager import _get_debug
import langchain
previous_value = langchain.debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.debug = not previous_value
new_value = langchain.debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_debug_is_settable_via_setter() -> None:
from langchain_core.callbacks.manager import _get_debug
from langchain import globals
previous_value = globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_debug(not previous_value)
new_value = globals._debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_verbose_is_settable_directly() -> None:
import langchain
from langchain.chains.base import _get_verbosity
previous_value = langchain.verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.verbose = not previous_value
new_value = langchain.verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
def test_verbose_is_settable_via_setter() -> None:
from langchain import globals
from langchain.chains.base import _get_verbosity
previous_value = globals._verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_verbose(not previous_value)
new_value = globals._verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
|
import unittest
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import skipIfNoRIR, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.frequency_impulse_response, (mags,))
class TorchScriptConsistencyCPUOnlyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@skipIfNoRIR
@parameterized.expand([(1,), (4,)])
def test_simulate_rir_ism_single_band(self, channel):
room_dim = torch.rand(3, dtype=self.dtype, device=self.device) + 5
mic_array = torch.rand(channel, 3, dtype=self.dtype, device=self.device) + 1
source = torch.rand(3, dtype=self.dtype, device=self.device) + 4
max_order = 3
absorption = 0.5
center_frequency = torch.tensor([125, 250, 500, 1000, 2000, 4000, 8000], dtype=self.dtype, device=self.device)
self._assert_consistency(
F.simulate_rir_ism,
(room_dim, source, mic_array, max_order, absorption, None, 81, center_frequency, 343.0, 16000.0),
)
@skipIfNoRIR
@parameterized.expand([(1,), (4,)])
def test_simulate_rir_ism_multi_band(self, channel):
room_dim = torch.rand(3, dtype=self.dtype, device=self.device) + 5
mic_array = torch.rand(channel, 3, dtype=self.dtype, device=self.device) + 1
source = torch.rand(3, dtype=self.dtype, device=self.device) + 4
max_order = 3
absorption = torch.rand(7, 6, dtype=self.dtype, device=self.device)
center_frequency = torch.tensor([125, 250, 500, 1000, 2000, 4000, 8000], dtype=self.dtype, device=self.device)
self._assert_consistency(
F.simulate_rir_ism,
(room_dim, source, mic_array, max_order, absorption, None, 81, center_frequency, 343.0, 16000.0),
)
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.frequency_impulse_response, (mags,))
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
def compute_output_shape(self, input_shape):
# Ensure axis is always treated as a list
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {self.axis} is out of bounds for "
f"input shape {input_shape}."
)
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model = CrossEncoder("distilroberta-base", num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# We might want to remap labels from the dataset, you can do that like so:
mapping = {0: 1, 1: 2, 2: 0}
eval_dataset = eval_dataset.map(lambda x: {"label": mapping[x["label"]]})
test_dataset = test_dataset.map(lambda x: {"label": mapping[x["label"]]})
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# During training, we use CEClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])),
eval_dataset["label"],
name="AllNLI-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name="ce-nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])),
test_dataset["label"],
name="AllNLI-test",
)
test_cls_evaluator(model)
# 8. Evaluate the final model and save it
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model = CrossEncoder("distilroberta-base", num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# We might want to remap labels from the dataset, you can do that like so:
mapping = {0: 1, 1: 2, 2: 0}
eval_dataset = eval_dataset.map(lambda x: {"label": mapping[x["label"]]})
test_dataset = test_dataset.map(lambda x: {"label": mapping[x["label"]]})
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])), eval_dataset["label"], name="AllNLI-dev"
)
dev_cls_evaluator(model)
# 5. Define the training arguments
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name="ce-nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])), test_dataset["label"], name="AllNLI-test"
)
test_cls_evaluator(model)
# 8. Evaluate the final model and save it
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=False,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import tanh_shrink
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.amadeus.flight_search import (
AmadeusFlightSearch,
FlightSearchSchema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FlightSearchSchema": "langchain_community.tools.amadeus.flight_search",
"AmadeusFlightSearch": "langchain_community.tools.amadeus.flight_search",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AmadeusFlightSearch",
"FlightSearchSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.amadeus.flight_search import (
AmadeusFlightSearch,
FlightSearchSchema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FlightSearchSchema": "langchain_community.tools.amadeus.flight_search",
"AmadeusFlightSearch": "langchain_community.tools.amadeus.flight_search",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FlightSearchSchema",
"AmadeusFlightSearch",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import ConcatDataset, MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .v3det import V3DetDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset', 'V3DetDataset', 'ConcatDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .v3det import V3DetDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset', 'V3DetDataset'
]
|
from jina.clients.base.http import HTTPBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncMutateMixin,
AsyncPostMixin,
AsyncProfileMixin,
HealthCheckMixin,
MutateMixin,
PostMixin,
ProfileMixin,
)
import asyncio
class HTTPClient(
HTTPBaseClient, PostMixin, ProfileMixin, MutateMixin, HealthCheckMixin
):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='http', asyncio=False, host='http://my.awesome.flow:1234'
) # returns HTTPClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncHTTPClient(
HTTPBaseClient,
AsyncPostMixin,
AsyncMutateMixin,
AsyncProfileMixin,
AsyncHealthCheckMixin,
):
"""
Asynchronous client connecting to a Gateway using HTTP protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`HTTPClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncHTTPClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='http', asyncio=True, host='http://my.awesome.flow:1234'
) # returns AsyncHTTPClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._lock = asyncio.Lock()
self.reuse_session = self.args.reuse_session
|
from jina.clients.base.http import HTTPBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncMutateMixin,
AsyncPostMixin,
AsyncProfileMixin,
HealthCheckMixin,
MutateMixin,
PostMixin,
ProfileMixin,
)
class HTTPClient(
HTTPBaseClient, PostMixin, ProfileMixin, MutateMixin, HealthCheckMixin
):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='http', asyncio=False, host='http://my.awesome.flow:1234'
) # returns HTTPClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncHTTPClient(
HTTPBaseClient,
AsyncPostMixin,
AsyncMutateMixin,
AsyncProfileMixin,
AsyncHealthCheckMixin,
):
"""
Asynchronous client connecting to a Gateway using HTTP protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`HTTPClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncHTTPClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='http', asyncio=True, host='http://my.awesome.flow:1234'
) # returns AsyncHTTPClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_8.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')),
neck=dict(
type='FPN',
in_channels=[80, 240, 720, 1920],
out_channels=256,
num_outs=5))
|
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_8.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')),
neck=dict(
type='FPN',
in_channels=[80, 240, 720, 1920],
out_channels=256,
num_outs=5))
|
"""Helper functions for managing the LangChain API.
This module is only relevant for LangChain developers, not for users.
.. warning::
This module and its submodules are for internal use only. Do not use them
in your own code. We may change the API at any time with no warning.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .beta_decorator import (
LangChainBetaWarning,
beta,
suppress_langchain_beta_warning,
surface_langchain_beta_warnings,
)
from .deprecation import (
LangChainDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
from .path import as_import_path, get_relative_path
__all__ = [
"as_import_path",
"beta",
"deprecated",
"get_relative_path",
"LangChainBetaWarning",
"LangChainDeprecationWarning",
"suppress_langchain_beta_warning",
"surface_langchain_beta_warnings",
"suppress_langchain_deprecation_warning",
"surface_langchain_deprecation_warnings",
"warn_deprecated",
]
_dynamic_imports = {
"LangChainBetaWarning": "beta_decorator",
"beta": "beta_decorator",
"suppress_langchain_beta_warning": "beta_decorator",
"surface_langchain_beta_warnings": "beta_decorator",
"as_import_path": "path",
"get_relative_path": "path",
"LangChainDeprecationWarning": "deprecation",
"deprecated": "deprecation",
"surface_langchain_deprecation_warnings": "deprecation",
"suppress_langchain_deprecation_warning": "deprecation",
"warn_deprecated": "deprecation",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Helper functions for managing the LangChain API.
This module is only relevant for LangChain developers, not for users.
.. warning::
This module and its submodules are for internal use only. Do not use them
in your own code. We may change the API at any time with no warning.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .beta_decorator import (
LangChainBetaWarning,
beta,
suppress_langchain_beta_warning,
surface_langchain_beta_warnings,
)
from .deprecation import (
LangChainDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
from .path import as_import_path, get_relative_path
__all__ = [
"as_import_path",
"beta",
"deprecated",
"get_relative_path",
"LangChainBetaWarning",
"LangChainDeprecationWarning",
"suppress_langchain_beta_warning",
"surface_langchain_beta_warnings",
"suppress_langchain_deprecation_warning",
"surface_langchain_deprecation_warnings",
"warn_deprecated",
]
_dynamic_imports = {
"LangChainBetaWarning": "beta_decorator",
"beta": "beta_decorator",
"suppress_langchain_beta_warning": "beta_decorator",
"surface_langchain_beta_warnings": "beta_decorator",
"as_import_path": "path",
"get_relative_path": "path",
"LangChainDeprecationWarning": "deprecation",
"deprecated": "deprecation",
"surface_langchain_deprecation_warnings": "deprecation",
"suppress_langchain_deprecation_warning": "deprecation",
"warn_deprecated": "deprecation",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
import logging
import os
import signal
import sys
from abc import ABC, abstractmethod
from multiprocessing import Process, set_start_method
from typing import Optional
from backend.util.logging import configure_logging
from backend.util.metrics import sentry_init
logger = logging.getLogger(__name__)
_SERVICE_NAME = "MainProcess"
def get_service_name():
return _SERVICE_NAME
def set_service_name(name: str):
global _SERVICE_NAME
_SERVICE_NAME = name
class AppProcess(ABC):
"""
A class to represent an object that can be executed in a background process.
"""
process: Optional[Process] = None
set_start_method("spawn", force=True)
configure_logging()
sentry_init()
# Methods that are executed INSIDE the process #
@abstractmethod
def run(self):
"""
The method that will be executed in the process.
"""
pass
@classmethod
@property
def service_name(cls) -> str:
return cls.__name__
def cleanup(self):
"""
Implement this method on a subclass to do post-execution cleanup,
e.g. disconnecting from a database or terminating child processes.
"""
pass
def health_check(self) -> str:
"""
A method to check the health of the process.
"""
return "OK"
def execute_run_command(self, silent):
signal.signal(signal.SIGTERM, self._self_terminate)
try:
if silent:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
set_service_name(self.service_name)
logger.info(f"[{self.service_name}] Starting...")
self.run()
except (KeyboardInterrupt, SystemExit) as e:
logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...")
def _self_terminate(self, signum: int, frame):
self.cleanup()
sys.exit(0)
# Methods that are executed OUTSIDE the process #
def __enter__(self):
self.start(background=True)
return self
def __exit__(self, *args, **kwargs):
self.stop()
def start(self, background: bool = False, silent: bool = False, **proc_args) -> int:
"""
Start the background process.
Args:
background: Whether to run the process in the background.
silent: Whether to disable stdout and stderr.
proc_args: Additional arguments to pass to the process.
Returns:
the process id or 0 if the process is not running in the background.
"""
if not background:
self.execute_run_command(silent)
return 0
self.process = Process(
name=self.__class__.__name__,
target=self.execute_run_command,
args=(silent,),
**proc_args,
)
self.process.start()
self.health_check()
logger.info(f"[{self.service_name}] started with PID {self.process.pid}")
return self.process.pid or 0
def stop(self):
"""
Stop the background process.
"""
if not self.process:
return
self.process.terminate()
self.process.join()
logger.info(f"[{self.service_name}] with PID {self.process.pid} stopped")
self.process = None
|
import logging
import os
import signal
import sys
from abc import ABC, abstractmethod
from multiprocessing import Process, set_start_method
from typing import Optional
from backend.util.logging import configure_logging
from backend.util.metrics import sentry_init
logger = logging.getLogger(__name__)
_SERVICE_NAME = "MainProcess"
def get_service_name():
return _SERVICE_NAME
def set_service_name(name: str):
global _SERVICE_NAME
_SERVICE_NAME = name
class AppProcess(ABC):
"""
A class to represent an object that can be executed in a background process.
"""
process: Optional[Process] = None
set_start_method("spawn", force=True)
configure_logging()
sentry_init()
# Methods that are executed INSIDE the process #
@abstractmethod
def run(self):
"""
The method that will be executed in the process.
"""
pass
@classmethod
@property
def service_name(cls) -> str:
return cls.__name__
def cleanup(self):
"""
Implement this method on a subclass to do post-execution cleanup,
e.g. disconnecting from a database or terminating child processes.
"""
pass
def health_check(self) -> str:
"""
A method to check the health of the process.
"""
return "OK"
def execute_run_command(self, silent):
signal.signal(signal.SIGTERM, self._self_terminate)
try:
if silent:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
set_service_name(self.service_name)
logger.info(f"[{self.service_name}] Starting...")
self.run()
except (KeyboardInterrupt, SystemExit) as e:
logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...")
def _self_terminate(self, signum: int, frame):
self.cleanup()
sys.exit(0)
# Methods that are executed OUTSIDE the process #
def __enter__(self):
self.start(background=True)
return self
def __exit__(self, *args, **kwargs):
self.stop()
def start(self, background: bool = False, silent: bool = False, **proc_args) -> int:
"""
Start the background process.
Args:
background: Whether to run the process in the background.
silent: Whether to disable stdout and stderr.
proc_args: Additional arguments to pass to the process.
Returns:
the process id or 0 if the process is not running in the background.
"""
if not background:
self.execute_run_command(silent)
return 0
self.process = Process(
name=self.__class__.__name__,
target=self.execute_run_command,
args=(silent,),
**proc_args,
)
self.process.start()
self.health_check()
return self.process.pid or 0
def stop(self):
"""
Stop the background process.
"""
if not self.process:
return
self.process.terminate()
self.process.join()
self.process = None
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
backend_empty_cache,
backend_reset_max_memory_allocated,
backend_reset_peak_memory_stats,
load_numpy,
require_accelerator,
require_hf_hub_version_greater,
require_torch_accelerator,
require_transformers_version_greater,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@require_hf_hub_version_greater("0.26.5")
@require_transformers_version_greater("4.47.1")
def test_save_load_dduf(self):
super().test_save_load_dduf(atol=1e-2, rtol=1e-2)
@slow
@require_torch_accelerator
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload(device=torch_device)
backend_reset_max_memory_allocated(torch_device)
backend_empty_cache(torch_device)
backend_reset_peak_memory_stats(torch_device)
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
load_numpy,
require_accelerator,
require_hf_hub_version_greater,
require_torch_gpu,
require_transformers_version_greater,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@require_hf_hub_version_greater("0.26.5")
@require_transformers_version_greater("4.47.1")
def test_save_load_dduf(self):
super().test_save_load_dduf(atol=1e-2, rtol=1e-2)
@slow
@require_torch_gpu
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload()
torch.cuda.reset_max_memory_allocated()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDoc):
integer: int
inner_list: List
class MMDoc(BaseDoc):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[ImageDoc] = None
matches: Optional[DocArray] = None
matches_with_same_id: Optional[DocArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocArray[MMDoc](
[MMDoc(id='a', matches=DocArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocArray[MMDoc](
[MMDoc(id='a', matches=DocArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocArray[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocArray[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocArray[MMDoc]([doc1, MMDoc()])
da2 = DocArray[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocArray[MMDoc]([doc1, MMDoc()])
da2 = DocArray[MMDoc]([MMDoc(), doc2])
da3 = DocArray[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[ImageDoc] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
da3 = DocumentArray[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
|
_base_ = './scnet_r50_fpn_1x_coco.py'
# learning policy
max_epochs = 20
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './scnet_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 100 mel frames in 1s (10ms each)
TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 50 audio tokens in 1s (20ms each)
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
from .utils import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
__all__ = ["_init_backend", "get_audio_backend", "list_audio_backends", "set_audio_backend"]
|
# flake8: noqa
import torchaudio
from . import utils
from .utils import _is_backend_dispatcher_enabled, get_audio_backend, list_audio_backends, set_audio_backend
if _is_backend_dispatcher_enabled():
from torchaudio._backend.utils import get_info_func, get_load_func, get_save_func
torchaudio.info = get_info_func()
torchaudio.load = get_load_func()
torchaudio.save = get_save_func()
else:
utils._init_audio_backend()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Optional
import torch
import torch.nn as nn
from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optimizer(
model: nn.Module,
cfg: dict,
default_scope: Optional[str] = None) -> torch.optim.Optimizer:
"""Build function of optimizer.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer constructor, and use optimizer constructor to build the
optimizer. If ``constructor`` is not set, the
``DefaultOptimizerConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer and optimizer constructor.
default_scope (str, optional): The ``default_scope`` is used to
reset the current registry. Defaults to None.
Returns:
torch.optim.Optimizer: The built optimizer.
"""
optimizer_cfg = copy.deepcopy(cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
optim_constructor = OPTIMIZER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg),
default_scope=default_scope)
optimizer = optim_constructor(model, default_scope=default_scope)
return optimizer
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import Callable, List
import torch
import torch.nn as nn
from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS
def register_torch_optimizers() -> List[str]:
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optimizer_constructor(cfg: dict) -> Callable:
return OPTIMIZER_CONSTRUCTORS.build(cfg)
def build_optimizer(model: nn.Module, cfg: dict) -> torch.optim.Optimizer:
optimizer_cfg = copy.deepcopy(cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
optim_constructor = build_optimizer_constructor(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg))
optimizer = optim_constructor(model)
return optimizer
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
sys.path.append("tests/python")
import test_demos as td # noqa
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(td.PYTHON_DEMO_DIR, "quantile_data_iterator.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, "update_process.py")
cmd = ["python", script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, "categorical.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
import os
import subprocess
import sys
import pytest
from xgboost import testing as tm
sys.path.append("tests/python")
import test_demos as td # noqa
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(td.PYTHON_DEMO_DIR, 'quantile_data_iterator.py')
cmd = ['python', script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, 'update_process.py')
cmd = ['python', script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, 'categorical.py')
cmd = ['python', script]
subprocess.check_call(cmd)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
from mmengine.utils import is_list_of
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
from mmengine.utils.misc import is_list_of
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
|
from __future__ import annotations
from collections.abc import Mapping
from types import ModuleType as Namespace
from typing import (
TYPE_CHECKING,
Literal,
Protocol,
TypeAlias,
TypedDict,
TypeVar,
final,
)
if TYPE_CHECKING:
from _typeshed import Incomplete
SupportsBufferProtocol: TypeAlias = Incomplete
Array: TypeAlias = Incomplete
Device: TypeAlias = Incomplete
DType: TypeAlias = Incomplete
else:
SupportsBufferProtocol = object
Array = object
Device = object
DType = object
_T_co = TypeVar("_T_co", covariant=True)
# These "Just" types are equivalent to the `Just` type from the `optype` library,
# apart from them not being `@runtime_checkable`.
# - docs: https://github.com/jorenham/optype/blob/master/README.md#just
# - code: https://github.com/jorenham/optype/blob/master/optype/_core/_just.py
@final
class JustInt(Protocol):
@property
def __class__(self, /) -> type[int]: ...
@__class__.setter
def __class__(self, value: type[int], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
@final
class JustFloat(Protocol):
@property
def __class__(self, /) -> type[float]: ...
@__class__.setter
def __class__(self, value: type[float], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
@final
class JustComplex(Protocol):
@property
def __class__(self, /) -> type[complex]: ...
@__class__.setter
def __class__(self, value: type[complex], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
#
class NestedSequence(Protocol[_T_co]):
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
def __len__(self, /) -> int: ...
class SupportsArrayNamespace(Protocol[_T_co]):
def __array_namespace__(self, /, *, api_version: str | None) -> _T_co: ...
class HasShape(Protocol[_T_co]):
@property
def shape(self, /) -> _T_co: ...
# Return type of `__array_namespace_info__.default_dtypes`
Capabilities = TypedDict(
"Capabilities",
{
"boolean indexing": bool,
"data-dependent shapes": bool,
"max dimensions": int,
},
)
# Return type of `__array_namespace_info__.default_dtypes`
DefaultDTypes = TypedDict(
"DefaultDTypes",
{
"real floating": DType,
"complex floating": DType,
"integral": DType,
"indexing": DType,
},
)
_DTypeKind: TypeAlias = Literal[
"bool",
"signed integer",
"unsigned integer",
"integral",
"real floating",
"complex floating",
"numeric",
]
# Type of the `kind` parameter in `__array_namespace_info__.dtypes`
DTypeKind: TypeAlias = _DTypeKind | tuple[_DTypeKind, ...]
# `__array_namespace_info__.dtypes(kind="bool")`
class DTypesBool(TypedDict):
bool: DType
# `__array_namespace_info__.dtypes(kind="signed integer")`
class DTypesSigned(TypedDict):
int8: DType
int16: DType
int32: DType
int64: DType
# `__array_namespace_info__.dtypes(kind="unsigned integer")`
class DTypesUnsigned(TypedDict):
uint8: DType
uint16: DType
uint32: DType
uint64: DType
# `__array_namespace_info__.dtypes(kind="integral")`
class DTypesIntegral(DTypesSigned, DTypesUnsigned):
pass
# `__array_namespace_info__.dtypes(kind="real floating")`
class DTypesReal(TypedDict):
float32: DType
float64: DType
# `__array_namespace_info__.dtypes(kind="complex floating")`
class DTypesComplex(TypedDict):
complex64: DType
complex128: DType
# `__array_namespace_info__.dtypes(kind="numeric")`
class DTypesNumeric(DTypesIntegral, DTypesReal, DTypesComplex):
pass
# `__array_namespace_info__.dtypes(kind=None)` (default)
class DTypesAll(DTypesBool, DTypesNumeric):
pass
# `__array_namespace_info__.dtypes(kind=?)` (fallback)
DTypesAny: TypeAlias = Mapping[str, DType]
__all__ = [
"Array",
"Capabilities",
"DType",
"DTypeKind",
"DTypesAny",
"DTypesAll",
"DTypesBool",
"DTypesNumeric",
"DTypesIntegral",
"DTypesSigned",
"DTypesUnsigned",
"DTypesReal",
"DTypesComplex",
"DefaultDTypes",
"Device",
"HasShape",
"Namespace",
"JustInt",
"JustFloat",
"JustComplex",
"NestedSequence",
"SupportsArrayNamespace",
"SupportsBufferProtocol",
]
def __dir__() -> list[str]:
return __all__
|
from __future__ import annotations
__all__ = [
"NestedSequence",
"SupportsBufferProtocol",
]
from types import ModuleType
from typing import (
Any,
TypeVar,
Protocol,
)
_T_co = TypeVar("_T_co", covariant=True)
class NestedSequence(Protocol[_T_co]):
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
def __len__(self, /) -> int: ...
SupportsBufferProtocol = Any
Array = Any
Device = Any
DType = Any
Namespace = ModuleType
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
T = TypeVar('T', bound='AudioNdArray')
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = parse_obj_as(AudioNdArray, doc_2.url.load())
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
"""
def to_audio_bytes(self):
tensor = (self * MAX_INT_16).astype('<h')
return tensor.tobytes()
|
from typing import TypeVar
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
T = TypeVar('T', bound='AudioNdArray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = parse_obj_as(AudioNdArray, doc_2.url.load())
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
"""
_PROTO_FIELD_NAME = 'audio_ndarray'
def to_audio_bytes(self):
tensor = (self * MAX_INT_16).astype('<h')
return tensor.tobytes()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import (register_all_modules, setup_cache_size_limit_of_dynamo,
setup_multi_processes)
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg',
'setup_cache_size_limit_of_dynamo'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg'
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
image_size = (640, 640)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(
data_preprocessor=dict(pad_size_divisor=64, batch_augments=batch_augments),
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
norm_cfg=norm_cfg,
num_outs=5),
roi_head=dict(
bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# learning policy
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs, val_interval=2)
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True),
clip_grad=None)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
image_size = (640, 640)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(
data_preprocessor=dict(pad_size_divisor=64, batch_augments=batch_augments),
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
norm_cfg=norm_cfg,
num_outs=5),
roi_head=dict(
bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# learning policy
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs, val_interval=2)
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True),
clip_grad=None)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
self.seed = sync_random_seed(seed)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# Must be the same across all workers. If None, will use a
# random seed shared among workers
# (require synchronization among all workers)
self.seed = sync_random_seed(seed)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
"""
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
from sentence_transformers import SentenceTransformer, util
import sys
import os
import time
import torch
import gzip
import csv
# Limit torch to 4 threads
torch.set_num_threads(4)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-nli-mean-tokens"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
nli_dataset_path = "datasets/AllNLI.tsv.gz"
sentences = set()
max_sentences = 100000
# Download datasets if needed
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
sentences.add(row["sentence1"])
if len(sentences) >= max_sentences:
break
sentences = list(sentences)
print("Model Name:", model_name)
print("Number of sentences:", len(sentences))
for i in range(3):
print("Run", i)
start_time = time.time()
emb = model.encode(sentences, batch_size=32)
end_time = time.time()
diff_time = end_time - start_time
print("Done after {:.2f} seconds".format(diff_time))
print("Speed: {:.2f} sentences / second".format(len(sentences) / diff_time))
print("=====")
|
"""
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
from sentence_transformers import SentenceTransformer, util
import sys
import os
import time
import torch
import gzip
import csv
# Limit torch to 4 threads
torch.set_num_threads(4)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-nli-mean-tokens"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
nli_dataset_path = "datasets/AllNLI.tsv.gz"
sentences = set()
max_sentences = 100000
# Download datasets if needed
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
sentences.add(row["sentence1"])
if len(sentences) >= max_sentences:
break
sentences = list(sentences)
print("Model Name:", model_name)
print("Number of sentences:", len(sentences))
for i in range(3):
print("Run", i)
start_time = time.time()
emb = model.encode(sentences, batch_size=32)
end_time = time.time()
diff_time = end_time - start_time
print("Done after {:.2f} seconds".format(diff_time))
print("Speed: {:.2f} sentences / second".format(len(sentences) / diff_time))
print("=====")
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
import json
import logging
import re
import zipfile
from pathlib import Path
from typing import Dict, Iterator, List, Union
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, HumanMessage
logger = logging.getLogger(__name__)
class SlackChatLoader(BaseChatLoader):
"""Load `Slack` conversations from a dump zip file."""
def __init__(
self,
path: Union[str, Path],
):
"""
Initialize the chat loader with the path to the exported Slack dump zip file.
:param path: Path to the exported Slack dump zip file.
"""
self.zip_path = path if isinstance(path, Path) else Path(path)
if not self.zip_path.exists():
raise FileNotFoundError(f"File {self.zip_path} not found")
@staticmethod
def _load_single_chat_session(messages: List[Dict]) -> ChatSession:
results: List[Union[AIMessage, HumanMessage]] = []
previous_sender = None
for message in messages:
if not isinstance(message, dict):
continue
text = message.get("text", "")
timestamp = message.get("ts", "")
sender = message.get("user", "")
if not sender:
continue
skip_pattern = re.compile(
r"<@U\d+> has joined the channel", flags=re.IGNORECASE
)
if skip_pattern.match(text):
continue
if sender == previous_sender:
results[-1].content += "\n\n" + text
results[-1].additional_kwargs["events"].append(
{"message_time": timestamp}
)
else:
results.append(
HumanMessage(
role=sender,
content=text,
additional_kwargs={
"sender": sender,
"events": [{"message_time": timestamp}],
},
)
)
previous_sender = sender
return ChatSession(messages=results)
@staticmethod
def _read_json(zip_file: zipfile.ZipFile, file_path: str) -> List[dict]:
"""Read JSON data from a zip subfile."""
with zip_file.open(file_path, "r") as f:
data = json.load(f)
if not isinstance(data, list):
raise ValueError(f"Expected list of dictionaries, got {type(data)}")
return data
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the Slack dump file and yield them
in the required format.
:return: Iterator of chat sessions containing messages.
"""
with zipfile.ZipFile(str(self.zip_path), "r") as zip_file:
for file_path in zip_file.namelist():
if file_path.endswith(".json"):
messages = self._read_json(zip_file, file_path)
yield self._load_single_chat_session(messages)
|
import json
import logging
import re
import zipfile
from pathlib import Path
from typing import Dict, Iterator, List, Union
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, HumanMessage
logger = logging.getLogger(__name__)
class SlackChatLoader(BaseChatLoader):
"""Load `Slack` conversations from a dump zip file."""
def __init__(
self,
path: Union[str, Path],
):
"""
Initialize the chat loader with the path to the exported Slack dump zip file.
:param path: Path to the exported Slack dump zip file.
"""
self.zip_path = path if isinstance(path, Path) else Path(path)
if not self.zip_path.exists():
raise FileNotFoundError(f"File {self.zip_path} not found")
@staticmethod
def _load_single_chat_session(messages: List[Dict]) -> ChatSession:
results: List[Union[AIMessage, HumanMessage]] = []
previous_sender = None
for message in messages:
if not isinstance(message, dict):
continue
text = message.get("text", "")
timestamp = message.get("ts", "")
sender = message.get("user", "")
if not sender:
continue
skip_pattern = re.compile(
r"<@U\d+> has joined the channel", flags=re.IGNORECASE
)
if skip_pattern.match(text):
continue
if sender == previous_sender:
results[-1].content += "\n\n" + text
results[-1].additional_kwargs["events"].append(
{"message_time": timestamp}
)
else:
results.append(
HumanMessage( # type: ignore[call-arg]
role=sender,
content=text,
additional_kwargs={
"sender": sender,
"events": [{"message_time": timestamp}],
},
)
)
previous_sender = sender
return ChatSession(messages=results)
@staticmethod
def _read_json(zip_file: zipfile.ZipFile, file_path: str) -> List[dict]:
"""Read JSON data from a zip subfile."""
with zip_file.open(file_path, "r") as f:
data = json.load(f)
if not isinstance(data, list):
raise ValueError(f"Expected list of dictionaries, got {type(data)}")
return data
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the Slack dump file and yield them
in the required format.
:return: Iterator of chat sessions containing messages.
"""
with zipfile.ZipFile(str(self.zip_path), "r") as zip_file:
for file_path in zip_file.namelist():
if file_path.endswith(".json"):
messages = self._read_json(zip_file, file_path)
yield self._load_single_chat_session(messages)
|
"""A simple progress bar for the console."""
import threading
from collections.abc import Sequence
from typing import Any, Optional
from uuid import UUID
from langchain_core.callbacks import base as base_callbacks
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
class ProgressBarCallback(base_callbacks.BaseCallbackHandler):
"""A simple progress bar for the console."""
def __init__(
self, total: int, ncols: int = 50, end_with: str = "\n", **kwargs: Any
):
"""Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
end_with: str, last string to print after progress bar reaches end.
"""
self.total = total
self.ncols = ncols
self.end_with = end_with
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
def increment(self) -> None:
"""Increment the counter and update the progress bar."""
with self.lock:
self.counter += 1
self._print_bar()
def _print_bar(self) -> None:
"""Print the progress bar to the console."""
progress = self.counter / self.total
arrow = "-" * int(round(progress * self.ncols) - 1) + ">"
spaces = " " * (self.ncols - len(arrow))
end = "" if self.counter < self.total else self.end_with
print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end=end) # noqa: T201
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
|
"""A simple progress bar for the console."""
import threading
from collections.abc import Sequence
from typing import Any, Optional
from uuid import UUID
from langchain_core.callbacks import base as base_callbacks
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
class ProgressBarCallback(base_callbacks.BaseCallbackHandler):
"""A simple progress bar for the console."""
def __init__(self, total: int, ncols: int = 50, **kwargs: Any):
"""Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
"""
self.total = total
self.ncols = ncols
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
def increment(self) -> None:
"""Increment the counter and update the progress bar."""
with self.lock:
self.counter += 1
self._print_bar()
def _print_bar(self) -> None:
"""Print the progress bar to the console."""
progress = self.counter / self.total
arrow = "-" * int(round(progress * self.ncols) - 1) + ">"
spaces = " " * (self.ncols - len(arrow))
print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="") # noqa: T201
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.