input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""List of test backend names."""
import enum
import os
# LINT.IfChange(backend_name)
class DTensorTestUtilBackend(enum.Enum):
"""DTensor backend the test is being run on."""
UNSPECIFIED = 'unspecified'
CPU = 'cpu'
GPU = 'gpu'
GPU_2DEVS_BACKEND = '2gpus'
TPU = 'tpu'
TPU_STREAM_EXECUTOR = 'tpu_se'
TPU_V3_DONUT_BACKEND = 'tpu_v3_2x2'
TPU_V4_DONUT_BACKEND = 'tpu_v4_2x2'
DTENSOR_TEST_UTIL_BACKEND = DTensorTestUtilBackend(
os.getenv('DTENSOR_TEST_UTIL_BACKEND', default='unspecified')
)
# LINT.ThenChange()
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""List of test backend names."""
import enum
import os
# LINT.IfChange(backend_name)
class DTensorTestUtilBackend(enum.Enum):
"""DTensor backend the test is being run on."""
UNSPECIFIED = 'unspecified'
CPU = 'cpu'
GPU = 'gpu'
GPU_2DEVS_BACKEND = '2gpus'
TPU = 'tpu'
TPU_STREAM_EXECUTOR = 'tpu_se'
TPU_V3_DONUT_BACKEND = 'tpu_v3_2x2'
TPU_V4_DONUT_BACKEND = 'tpu_v4_2x2'
PATHWAYS = 'pw'
PATHWAYS_V3_DONUT_BACKEND = 'pw_v3_2x2'
DTENSOR_TEST_UTIL_BACKEND = DTensorTestUtilBackend(
os.getenv('DTENSOR_TEST_UTIL_BACKEND', default='unspecified')
)
# LINT.ThenChange()
|
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import ContributorDetails, SchemaField
class ReadCsvBlock(Block):
class Input(BlockSchema):
contents: str = SchemaField(
description="The contents of the CSV file to read",
placeholder="a, b, c\n1,2,3\n4,5,6",
)
delimiter: str = SchemaField(
description="The delimiter used in the CSV file",
default=",",
)
quotechar: str = SchemaField(
description="The character used to quote fields",
default='"',
)
escapechar: str = SchemaField(
description="The character used to escape the delimiter",
default="\\",
)
has_header: bool = SchemaField(
description="Whether the CSV file has a header row",
default=True,
)
skip_rows: int = SchemaField(
description="The number of rows to skip from the start of the file",
default=0,
)
strip: bool = SchemaField(
description="Whether to strip whitespace from the values",
default=True,
)
skip_columns: list[str] = SchemaField(
description="The columns to skip from the start of the row",
default_factory=list,
)
class Output(BlockSchema):
row: dict[str, str] = SchemaField(
description="The data produced from each row in the CSV file"
)
all_data: list[dict[str, str]] = SchemaField(
description="All the data in the CSV file as a list of rows"
)
def __init__(self):
super().__init__(
id="acf7625e-d2cb-4941-bfeb-2819fc6fc015",
input_schema=ReadCsvBlock.Input,
output_schema=ReadCsvBlock.Output,
description="Reads a CSV file and outputs the data as a list of dictionaries and individual rows via rows.",
contributors=[ContributorDetails(name="Nicholas Tindle")],
categories={BlockCategory.TEXT, BlockCategory.DATA},
test_input={
"contents": "a, b, c\n1,2,3\n4,5,6",
},
test_output=[
("row", {"a": "1", "b": "2", "c": "3"}),
("row", {"a": "4", "b": "5", "c": "6"}),
(
"all_data",
[
{"a": "1", "b": "2", "c": "3"},
{"a": "4", "b": "5", "c": "6"},
],
),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
import csv
from io import StringIO
csv_file = StringIO(input_data.contents)
reader = csv.reader(
csv_file,
delimiter=input_data.delimiter,
quotechar=input_data.quotechar,
escapechar=input_data.escapechar,
)
header = None
if input_data.has_header:
header = next(reader)
if input_data.strip:
header = [h.strip() for h in header]
for _ in range(input_data.skip_rows):
next(reader)
def process_row(row):
data = {}
for i, value in enumerate(row):
if i not in input_data.skip_columns:
if input_data.has_header and header:
data[header[i]] = value.strip() if input_data.strip else value
else:
data[str(i)] = value.strip() if input_data.strip else value
return data
all_data = []
for row in reader:
processed_row = process_row(row)
all_data.append(processed_row)
yield "row", processed_row
yield "all_data", all_data
|
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import ContributorDetails, SchemaField
class ReadCsvBlock(Block):
class Input(BlockSchema):
contents: str = SchemaField(
description="The contents of the CSV file to read",
placeholder="a, b, c\n1,2,3\n4,5,6",
)
delimiter: str = SchemaField(
description="The delimiter used in the CSV file",
default=",",
)
quotechar: str = SchemaField(
description="The character used to quote fields",
default='"',
)
escapechar: str = SchemaField(
description="The character used to escape the delimiter",
default="\\",
)
has_header: bool = SchemaField(
description="Whether the CSV file has a header row",
default=True,
)
skip_rows: int = SchemaField(
description="The number of rows to skip from the start of the file",
default=0,
)
strip: bool = SchemaField(
description="Whether to strip whitespace from the values",
default=True,
)
skip_columns: list[str] = SchemaField(
description="The columns to skip from the start of the row",
default=[],
)
class Output(BlockSchema):
row: dict[str, str] = SchemaField(
description="The data produced from each row in the CSV file"
)
all_data: list[dict[str, str]] = SchemaField(
description="All the data in the CSV file as a list of rows"
)
def __init__(self):
super().__init__(
id="acf7625e-d2cb-4941-bfeb-2819fc6fc015",
input_schema=ReadCsvBlock.Input,
output_schema=ReadCsvBlock.Output,
description="Reads a CSV file and outputs the data as a list of dictionaries and individual rows via rows.",
contributors=[ContributorDetails(name="Nicholas Tindle")],
categories={BlockCategory.TEXT, BlockCategory.DATA},
test_input={
"contents": "a, b, c\n1,2,3\n4,5,6",
},
test_output=[
("row", {"a": "1", "b": "2", "c": "3"}),
("row", {"a": "4", "b": "5", "c": "6"}),
(
"all_data",
[
{"a": "1", "b": "2", "c": "3"},
{"a": "4", "b": "5", "c": "6"},
],
),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
import csv
from io import StringIO
csv_file = StringIO(input_data.contents)
reader = csv.reader(
csv_file,
delimiter=input_data.delimiter,
quotechar=input_data.quotechar,
escapechar=input_data.escapechar,
)
header = None
if input_data.has_header:
header = next(reader)
if input_data.strip:
header = [h.strip() for h in header]
for _ in range(input_data.skip_rows):
next(reader)
def process_row(row):
data = {}
for i, value in enumerate(row):
if i not in input_data.skip_columns:
if input_data.has_header and header:
data[header[i]] = value.strip() if input_data.strip else value
else:
data[str(i)] = value.strip() if input_data.strip else value
return data
all_data = []
for row in reader:
processed_row = process_row(row)
all_data.append(processed_row)
yield "row", processed_row
yield "all_data", all_data
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .tacotron2_loss_impl import (
Tacotron2LossGradcheckTests,
Tacotron2LossShapeTests,
Tacotron2LossTorchscriptTests,
)
class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2TorchsciptFloat32CPU(Tacotron2LossTorchscriptTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2GradcheckFloat64CPU(Tacotron2LossGradcheckTests, PytorchTestCase):
dtype = torch.float64 # gradcheck needs a higher numerical accuracy
device = torch.device("cpu")
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .tacotron2_loss_impl import (
Tacotron2LossShapeTests,
Tacotron2LossTorchscriptTests,
Tacotron2LossGradcheckTests,
)
class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2TorchsciptFloat32CPU(Tacotron2LossTorchscriptTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2GradcheckFloat64CPU(Tacotron2LossGradcheckTests, PytorchTestCase):
dtype = torch.float64 # gradcheck needs a higher numerical accuracy
device = torch.device("cpu")
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import numpy as np
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
class ImageUrl(AnyUrl):
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(image_url=str(self))
def load(self) -> np.ndarray:
"""
transform the url in a image Tensor
this is just a patch we will move the function from old docarray
:return: tensor image
"""
return np.zeros((3, 224, 224))
|
import numpy as np
from .any_url import AnyUrl
class ImageUrl(AnyUrl):
def load(self) -> np.ndarray:
"""
transform the url in a image Tensor
this is just a patch we will move the function from old docarray
:return: tensor image
"""
return np.zeros((3, 224, 224))
|
import sys
from typing import Callable
import pytest
from langchain_core.runnables.base import RunnableLambda
from langchain_core.runnables.utils import (
get_function_nonlocals,
get_lambda_source,
indent_lines_after_first,
)
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
@pytest.mark.parametrize(
("func", "expected_source"),
[
(lambda x: x * 2, "lambda x: x * 2"),
(lambda a, b: a + b, "lambda a, b: a + b"),
(lambda x: x if x > 0 else 0, "lambda x: x if x > 0 else 0"), # noqa: FURB136
],
)
def test_get_lambda_source(func: Callable, expected_source: str) -> None:
"""Test get_lambda_source function."""
source = get_lambda_source(func)
assert source == expected_source
@pytest.mark.parametrize(
("text", "prefix", "expected_output"),
[
("line 1\nline 2\nline 3", "1", "line 1\n line 2\n line 3"),
("line 1\nline 2\nline 3", "ax", "line 1\n line 2\n line 3"),
],
)
def test_indent_lines_after_first(text: str, prefix: str, expected_output: str) -> None:
"""Test indent_lines_after_first function."""
indented_text = indent_lines_after_first(text, prefix)
assert indented_text == expected_output
global_agent = RunnableLambda(lambda x: x * 3)
def test_nonlocals() -> None:
agent = RunnableLambda(lambda x: x * 2)
def my_func(value: str, agent: dict[str, str]) -> str:
return agent.get("agent_name", value)
def my_func2(value: str) -> str:
return agent.get("agent_name", value) # type: ignore[attr-defined]
def my_func3(value: str) -> str:
return agent.invoke(value)
def my_func4(value: str) -> str:
return global_agent.invoke(value)
def my_func5() -> tuple[Callable[[str], str], RunnableLambda]:
global_agent = RunnableLambda(lambda x: x * 3)
def my_func6(value: str) -> str:
return global_agent.invoke(value)
return my_func6, global_agent
assert get_function_nonlocals(my_func) == []
assert get_function_nonlocals(my_func2) == []
assert get_function_nonlocals(my_func3) == [agent.invoke]
assert get_function_nonlocals(my_func4) == [global_agent.invoke]
func, nl = my_func5()
assert get_function_nonlocals(func) == [nl.invoke]
assert RunnableLambda(my_func3).deps == [agent]
assert RunnableLambda(my_func4).deps == [global_agent]
assert RunnableLambda(func).deps == [nl]
|
import sys
from typing import Callable
import pytest
from langchain_core.runnables.base import RunnableLambda
from langchain_core.runnables.utils import (
get_function_nonlocals,
get_lambda_source,
indent_lines_after_first,
)
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
@pytest.mark.parametrize(
("func", "expected_source"),
[
(lambda x: x * 2, "lambda x: x * 2"),
(lambda a, b: a + b, "lambda a, b: a + b"),
(lambda x: x if x > 0 else 0, "lambda x: x if x > 0 else 0"), # noqa: FURB136
],
)
def test_get_lambda_source(func: Callable, expected_source: str) -> None:
"""Test get_lambda_source function."""
source = get_lambda_source(func)
assert source == expected_source
@pytest.mark.parametrize(
("text", "prefix", "expected_output"),
[
("line 1\nline 2\nline 3", "1", "line 1\n line 2\n line 3"),
("line 1\nline 2\nline 3", "ax", "line 1\n line 2\n line 3"),
],
)
def test_indent_lines_after_first(text: str, prefix: str, expected_output: str) -> None:
"""Test indent_lines_after_first function."""
indented_text = indent_lines_after_first(text, prefix)
assert indented_text == expected_output
global_agent = RunnableLambda(lambda x: x * 3)
def test_nonlocals() -> None:
agent = RunnableLambda(lambda x: x * 2)
def my_func(input: str, agent: dict[str, str]) -> str:
return agent.get("agent_name", input)
def my_func2(input: str) -> str:
return agent.get("agent_name", input) # type: ignore[attr-defined]
def my_func3(input: str) -> str:
return agent.invoke(input)
def my_func4(input: str) -> str:
return global_agent.invoke(input)
def my_func5() -> tuple[Callable[[str], str], RunnableLambda]:
global_agent = RunnableLambda(lambda x: x * 3)
def my_func6(input: str) -> str:
return global_agent.invoke(input)
return my_func6, global_agent
assert get_function_nonlocals(my_func) == []
assert get_function_nonlocals(my_func2) == []
assert get_function_nonlocals(my_func3) == [agent.invoke]
assert get_function_nonlocals(my_func4) == [global_agent.invoke]
func, nl = my_func5()
assert get_function_nonlocals(func) == [nl.invoke]
assert RunnableLambda(my_func3).deps == [agent]
assert RunnableLambda(my_func4).deps == [global_agent]
assert RunnableLambda(func).deps == [nl]
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = "roberta-base"
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
output_path = "output/askubuntu-simcse-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs * len(train_dataloader) * 0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True, # If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = "roberta-base"
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
output_path = "output/askubuntu-simcse-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs * len(train_dataloader) * 0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True, # If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from typing import Any, Optional
import pytest
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.base import Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
return {"bar": "bar"}
def test_stdoutcallback(capsys: pytest.CaptureFixture) -> Any:
"""Test the stdout callback handler."""
chain_test = FakeChain(callbacks=[StdOutCallbackHandler(color="red")])
chain_test.invoke({"foo": "bar"})
# Capture the output
captured = capsys.readouterr()
# Assert the output is as expected
assert captured.out == (
"\n\n\x1b[1m> Entering new FakeChain "
"chain...\x1b[0m\n\n\x1b[1m> Finished chain.\x1b[0m\n"
)
|
from typing import Any, Optional
import pytest
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.base import CallbackManagerForChainRun, Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
return {"bar": "bar"}
def test_stdoutcallback(capsys: pytest.CaptureFixture) -> Any:
"""Test the stdout callback handler."""
chain_test = FakeChain(callbacks=[StdOutCallbackHandler(color="red")])
chain_test.invoke({"foo": "bar"})
# Capture the output
captured = capsys.readouterr()
# Assert the output is as expected
assert captured.out == (
"\n\n\x1b[1m> Entering new FakeChain "
"chain...\x1b[0m\n\n\x1b[1m> Finished chain.\x1b[0m\n"
)
|
from ._dsp import (
adsr_envelope,
exp_sigmoid,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from ._rir import ray_tracing, simulate_rir_ism
from .functional import barkscale_fbanks, chroma_filterbank
__all__ = [
"adsr_envelope",
"exp_sigmoid",
"barkscale_fbanks",
"chroma_filterbank",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"ray_tracing",
"sinc_impulse_response",
"simulate_rir_ism",
]
|
from ._dsp import (
adsr_envelope,
exp_sigmoid,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from ._rir import simulate_rir_ism
from .functional import barkscale_fbanks, chroma_filterbank
__all__ = [
"adsr_envelope",
"exp_sigmoid",
"barkscale_fbanks",
"chroma_filterbank",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
"simulate_rir_ism",
]
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset column names to Router routes, like "query" or "document". This is used to specify
which Router submodule to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of column names to routes.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset names to Router routes, like "query" or "document". This is used to specify which
Router module to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of dataset names to routes for single-dataset training/evaluation.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter names to learning rates. This allows you to set different learning rates for
different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is useful when you want to
fine-tune specific parts of the model with different learning rates.
"""
|
from typing import Any, Optional, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.tv_tensors._bounding_boxes import CLAMPING_MODE_TYPE
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
self.format = format
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
Args:
clamping_mode: TODOBB more docs. Default is None which relies on the input box' clamping_mode attribute.
"""
def __init__(self, clamping_mode: Optional[CLAMPING_MODE_TYPE] = None) -> None:
super().__init__()
self.clamping_mode = clamping_mode
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt, clamping_mode=self.clamping_mode) # type: ignore[return-value]
class ClampKeyPoints(Transform):
"""Clamp keypoints to their corresponding image dimensions.
The clamping is done according to the keypoints' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.KeyPoints,)
def transform(self, inpt: tv_tensors.KeyPoints, params: dict[str, Any]) -> tv_tensors.KeyPoints:
return F.clamp_keypoints(inpt) # type: ignore[return-value]
class SetClampingMode(Transform):
"""TODOBB"""
def __init__(self, clamping_mode: CLAMPING_MODE_TYPE) -> None:
super().__init__()
# TODOBB validate mode
self.clamping_mode = clamping_mode
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
out: tv_tensors.BoundingBoxes = inpt.clone() # type: ignore[assignment]
out.clamping_mode = self.clamping_mode
return out
|
from typing import Any, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
self.format = format
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
class ClampKeyPoints(Transform):
"""Clamp keypoints to their corresponding image dimensions.
The clamping is done according to the keypoints' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.KeyPoints,)
def transform(self, inpt: tv_tensors.KeyPoints, params: dict[str, Any]) -> tv_tensors.KeyPoints:
return F.clamp_keypoints(inpt) # type: ignore[return-value]
|
import warnings
from typing import Optional, Tuple, TypeVar
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to an audio file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Tuple[AudioNdArray, int]:
"""
Load the data from the url into an [`AudioNdArray`][docarray.typing.AudioNdArray]
and the frame rate.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, AudioNdArray)
```
---
:return: tuple of an [`AudioNdArray`][docarray.typing.AudioNdArray] representing
the audio file content, and an integer representing the frame rate.
"""
bytes_ = self.load_bytes()
return bytes_.load()
def load_bytes(self, timeout: Optional[float] = None) -> AudioBytes:
"""
Convert url to [`AudioBytes`][docarray.typing.AudioBytes]. This will either load or
download the file and save it into an [`AudioBytes`][docarray.typing.AudioBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`AudioBytes`][docarray.typing.AudioBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return AudioBytes(bytes_)
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import Optional, Tuple, TypeVar
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to an audio file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Tuple[AudioNdArray, int]:
"""
Load the data from the url into an AudioNdArray and the frame rate.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, AudioNdArray)
```
---
:return: tuple of an AudioNdArray representing the Audio file content,
and an integer representing the frame rate.
"""
bytes_ = self.load_bytes()
return bytes_.load()
def load_bytes(self, timeout: Optional[float] = None) -> AudioBytes:
"""
Convert url to AudioBytes. This will either load or download the file and save
it into an AudioBytes object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: AudioBytes object
"""
bytes_ = super().load_bytes(timeout=timeout)
return AudioBytes(bytes_)
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import asyncio
import logging
from typing import Optional
import aiohttp
from fastapi import HTTPException
from backend.data import graph as graph_db
from backend.data.block import get_block
from backend.util.settings import Settings
from .models import ApiResponse, ChatRequest, GraphData
logger = logging.getLogger(__name__)
settings = Settings()
OTTO_API_URL = settings.config.otto_api_url
class OttoService:
@staticmethod
async def _fetch_graph_data(
request: ChatRequest, user_id: str
) -> Optional[GraphData]:
"""Fetch graph data if requested and available."""
if not (request.include_graph_data and request.graph_id):
return None
try:
graph = await graph_db.get_graph(request.graph_id, user_id=user_id)
if not graph:
return None
nodes_data = []
for node in graph.nodes:
block = get_block(node.block_id)
if not block:
continue
node_data = {
"id": node.id,
"block_id": node.block_id,
"block_name": block.name,
"block_type": (
block.block_type.value if hasattr(block, "block_type") else None
),
"data": {
k: v
for k, v in (node.input_default or {}).items()
if k not in ["credentials"] # Exclude sensitive data
},
}
nodes_data.append(node_data)
# Create a GraphData object with the required fields
return GraphData(
nodes=nodes_data,
edges=[],
graph_name=graph.name,
graph_description=graph.description,
)
except Exception as e:
logger.error(f"Failed to fetch graph data: {str(e)}")
return None
@staticmethod
async def ask(request: ChatRequest, user_id: str) -> ApiResponse:
"""
Send request to Otto API and handle the response.
"""
# Check if Otto API URL is configured
if not OTTO_API_URL:
logger.error("Otto API URL is not configured")
raise HTTPException(
status_code=503, detail="Otto service is not configured"
)
try:
async with aiohttp.ClientSession() as session:
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# If graph data is requested, fetch it
graph_data = await OttoService._fetch_graph_data(request, user_id)
# Prepare the payload with optional graph data
payload = {
"query": request.query,
"conversation_history": [
msg.model_dump() for msg in request.conversation_history
],
"user_id": user_id,
"message_id": request.message_id,
}
if graph_data:
payload["graph_data"] = graph_data.model_dump()
logger.info(f"Sending request to Otto API for user {user_id}")
logger.debug(f"Request payload: {payload}")
async with session.post(
OTTO_API_URL,
json=payload,
headers=headers,
timeout=aiohttp.ClientTimeout(total=60),
) as response:
if response.status != 200:
error_text = await response.text()
logger.error(f"Otto API error: {error_text}")
raise HTTPException(
status_code=response.status,
detail=f"Otto API request failed: {error_text}",
)
data = await response.json()
logger.info(
f"Successfully received response from Otto API for user {user_id}"
)
return ApiResponse(**data)
except aiohttp.ClientError as e:
logger.error(f"Connection error to Otto API: {str(e)}")
raise HTTPException(
status_code=503, detail="Failed to connect to Otto service"
)
except asyncio.TimeoutError:
logger.error("Timeout error connecting to Otto API after 60 seconds")
raise HTTPException(
status_code=504, detail="Request to Otto service timed out"
)
except Exception as e:
logger.error(f"Unexpected error in Otto API proxy: {str(e)}")
raise HTTPException(
status_code=500, detail="Internal server error in Otto proxy"
)
|
import logging
from typing import Optional
import aiohttp
from fastapi import HTTPException
from backend.data import graph as graph_db
from backend.data.block import get_block
from backend.util.settings import Settings
from .models import ApiResponse, ChatRequest, GraphData
logger = logging.getLogger(__name__)
settings = Settings()
OTTO_API_URL = settings.config.otto_api_url
class OttoService:
@staticmethod
async def _fetch_graph_data(
request: ChatRequest, user_id: str
) -> Optional[GraphData]:
"""Fetch graph data if requested and available."""
if not (request.include_graph_data and request.graph_id):
return None
try:
graph = await graph_db.get_graph(request.graph_id, user_id=user_id)
if not graph:
return None
nodes_data = []
for node in graph.nodes:
block = get_block(node.block_id)
if not block:
continue
node_data = {
"id": node.id,
"block_id": node.block_id,
"block_name": block.name,
"block_type": (
block.block_type.value if hasattr(block, "block_type") else None
),
"data": {
k: v
for k, v in (node.input_default or {}).items()
if k not in ["credentials"] # Exclude sensitive data
},
}
nodes_data.append(node_data)
# Create a GraphData object with the required fields
return GraphData(
nodes=nodes_data,
edges=[],
graph_name=graph.name,
graph_description=graph.description,
)
except Exception as e:
logger.error(f"Failed to fetch graph data: {str(e)}")
return None
@staticmethod
async def ask(request: ChatRequest, user_id: str) -> ApiResponse:
"""
Send request to Otto API and handle the response.
"""
try:
async with aiohttp.ClientSession() as session:
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# If graph data is requested, fetch it
graph_data = await OttoService._fetch_graph_data(request, user_id)
# Prepare the payload with optional graph data
payload = {
"query": request.query,
"conversation_history": [
msg.model_dump() for msg in request.conversation_history
],
"user_id": user_id,
"message_id": request.message_id,
}
if graph_data:
payload["graph_data"] = graph_data.model_dump()
logger.info(f"Sending request to Otto API for user {user_id}")
logger.debug(f"Request payload: {payload}")
async with session.post(
OTTO_API_URL, json=payload, headers=headers
) as response:
if response.status != 200:
error_text = await response.text()
logger.error(f"Otto API error: {error_text}")
raise HTTPException(
status_code=response.status,
detail=f"Otto API request failed: {error_text}",
)
data = await response.json()
logger.info(
f"Successfully received response from Otto API for user {user_id}"
)
return ApiResponse(**data)
except aiohttp.ClientError as e:
logger.error(f"Connection error to Otto API: {str(e)}")
raise HTTPException(
status_code=503, detail="Failed to connect to Otto service"
)
except Exception as e:
logger.error(f"Unexpected error in Otto API proxy: {str(e)}")
raise HTTPException(
status_code=500, detail="Internal server error in Otto proxy"
)
|
from . import utils
from .model import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
wavlm_base,
wavlm_large,
wavlm_model,
)
__all__ = [
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wavlm_model",
"wavlm_base",
"wavlm_large",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"utils",
]
|
from . import utils
from .model import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
__all__ = [
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"utils",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_match_cost
from .match_cost import (BBoxL1Cost, ClassificationCost, DiceCost,
FocalLossCost, IoUCost)
__all__ = [
'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
'FocalLossCost', 'DiceCost'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_match_cost
from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost
__all__ = [
'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
'FocalLossCost'
]
|
"""Azure Cognitive Vision tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze"
class AzureCVToolSpec(BaseToolSpec):
"""Azure Cognitive Vision tool spec."""
spec_functions = ["process_image"]
def __init__(
self,
resource: str,
api_key: str,
language: Optional[str] = "en",
api_version: Optional[str] = "2023-04-01-preview",
) -> None:
"""Initialize with parameters."""
self.api_key = api_key
self.cv_url = CV_URL_TMPL.format(resource=resource)
self.language = language
self.api_version = api_version
def process_image(self, url: str, features: List[str]):
"""
This tool accepts an image url or file and can process and return a variety of text depending on the use case.
You can use the features argument to configure what text you want returned.
Args:
url (str): The url for the image to caption
features (List[str]): Instructions on how to process the image. Valid keys are tags, objects, read, caption
"""
response = requests.post(
f"{self.cv_url}?features={','.join(features)}&language={self.language}&api-version={self.api_version}",
headers={"Ocp-Apim-Subscription-Key": self.api_key},
json={"url": url},
)
response_json = response.json()
if "read" in features:
response_json["readResult"] = response_json["readResult"]["content"]
return response_json
|
"""Azure Cognitive Vision tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze"
class AzureCVToolSpec(BaseToolSpec):
"""Azure Cognitive Vision tool spec."""
spec_functions = ["process_image"]
def __init__(
self,
resource: str,
api_key: str,
language: Optional[str] = "en",
api_version: Optional[str] = "2023-04-01-preview",
) -> None:
"""Initialize with parameters."""
self.api_key = api_key
self.cv_url = CV_URL_TMPL.format(resource=resource)
self.language = language
self.api_version = api_version
def process_image(self, url: str, features: List[str]):
"""
This tool accepts an image url or file and can process and return a variety of text depending on the use case.
You can use the features argument to configure what text you want returned.
Args:
url (str): The url for the image to caption
features (List[str]): Instructions on how to process the image. Valid keys are tags, objects, read, caption
"""
response = requests.post(
f'{self.cv_url}?features={",".join(features)}&language={self.language}&api-version={self.api_version}',
headers={"Ocp-Apim-Subscription-Key": self.api_key},
json={"url": url},
)
response_json = response.json()
if "read" in features:
response_json["readResult"] = response_json["readResult"]["content"]
return response_json
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_patch.time += 1
return time_patch.time
class TestIterTimerHook(TestCase):
def setUp(self) -> None:
self.hook = IterTimerHook()
def test_init(self):
assert self.hook.time_sec_tot == 0
assert self.hook.start_iter == 0
def test_before_train(self):
runner = MagicMock()
runner.iter = 1
self.hook.before_train(runner)
assert self.hook.start_iter == 1
def test_before_epoch(self):
runner = Mock()
self.hook._before_epoch(runner)
assert isinstance(self.hook.t, float)
@patch('time.time', MagicMock(return_value=1))
def test_before_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
self.hook._before_epoch(runner)
for mode in ('train', 'val', 'test'):
self.hook._before_iter(runner, batch_idx=1, mode=mode)
runner.message_hub.update_scalar.assert_called_with(
f'{mode}/data_time', 0)
@patch('time.time', time_patch)
def test_after_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
runner.log_processor.window_size = 10
runner.max_iters = 100
runner.iter = 0
runner.test_dataloader = [0] * 20
runner.val_dataloader = [0] * 20
runner.message_hub = MessageHub.get_instance('test_iter_timer_hook')
self.hook.before_run(runner)
self.hook._before_epoch(runner)
# eta = (100 - 10) / 1
for _ in range(10):
self.hook._after_iter(runner, 1)
runner.iter += 1
assert runner.message_hub.get_info('eta') == 90
for i in range(10):
self.hook._after_iter(runner, batch_idx=i, mode='val')
assert runner.message_hub.get_info('eta') == 10
for i in range(11, 20):
self.hook._after_iter(runner, batch_idx=i, mode='val')
assert runner.message_hub.get_info('eta') == 0
self.hook.after_val_epoch(runner)
for i in range(10):
self.hook._after_iter(runner, batch_idx=i, mode='test')
assert runner.message_hub.get_info('eta') == 10
for i in range(11, 20):
self.hook._after_iter(runner, batch_idx=i, mode='test')
assert runner.message_hub.get_info('eta') == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_patch.time += 1
return time_patch.time
class TestIterTimerHook(TestCase):
def setUp(self) -> None:
self.hook = IterTimerHook()
def test_init(self):
assert self.hook.time_sec_tot == 0
assert self.hook.start_iter == 0
def test_before_train(self):
runner = MagicMock()
runner.iter = 1
self.hook.before_train(runner)
assert self.hook.start_iter == 1
def test_before_epoch(self):
runner = Mock()
self.hook._before_epoch(runner)
assert isinstance(self.hook.t, float)
@patch('time.time', MagicMock(return_value=1))
def test_before_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
self.hook._before_epoch(runner)
for mode in ('train', 'val', 'test'):
self.hook._before_iter(runner, batch_idx=1, mode=mode)
runner.message_hub.update_scalar.assert_called_with(
f'{mode}/data_time', 0)
@patch('time.time', time_patch)
def test_after_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
runner.log_processor.window_size = 10
runner.max_iters = 100
runner.iter = 0
runner.test_dataloader = [0] * 20
runner.val_dataloader = [0] * 20
self.hook._before_epoch(runner)
self.hook.before_run(runner)
self.hook._after_iter(runner, batch_idx=1)
runner.message_hub.update_scalar.assert_called()
runner.message_hub.get_log.assert_not_called()
runner.message_hub.update_info.assert_not_called()
runner.message_hub = MessageHub.get_instance('test_iter_timer_hook')
runner.iter = 9
# eta = (100 - 10) / 1
self.hook._after_iter(runner, batch_idx=89)
assert runner.message_hub.get_info('eta') == 90
self.hook._after_iter(runner, batch_idx=9, mode='val')
assert runner.message_hub.get_info('eta') == 10
self.hook._after_iter(runner, batch_idx=19, mode='test')
assert runner.message_hub.get_info('eta') == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(
model,
args.img,
result,
palette=args.palette,
score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(
model,
args.img,
result[0],
palette=args.palette,
score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(model, args.img, result[0], score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
from typing import List
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.schema import QueryType
class SynthesizeStartEvent(BaseEvent):
"""
SynthesizeStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
"""
query: QueryType
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SynthesizeStartEvent"
class SynthesizeEndEvent(BaseEvent):
"""
SynthesizeEndEvent.
Args:
query (QueryType): Query as a string or query bundle.
response (RESPONSE_TYPE): Response.
"""
query: QueryType
response: RESPONSE_TYPE
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SynthesizeEndEvent"
class GetResponseStartEvent(BaseEvent):
"""
GetResponseStartEvent.
Args:
query_str (str): Query string.
text_chunks (List[str]): List of text chunks.
"""
query_str: str
text_chunks: List[str]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "GetResponseStartEvent"
class GetResponseEndEvent(BaseEvent):
"""GetResponseEndEvent."""
# TODO: consumes the first chunk of generators??
# response: RESPONSE_TEXT_TYPE
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "GetResponseEndEvent"
|
from typing import List
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.schema import QueryType
class SynthesizeStartEvent(BaseEvent):
"""SynthesizeStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
"""
query: QueryType
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SynthesizeStartEvent"
class SynthesizeEndEvent(BaseEvent):
"""SynthesizeEndEvent.
Args:
query (QueryType): Query as a string or query bundle.
response (RESPONSE_TYPE): Response.
"""
query: QueryType
response: RESPONSE_TYPE
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SynthesizeEndEvent"
class GetResponseStartEvent(BaseEvent):
"""GetResponseStartEvent.
Args:
query_str (str): Query string.
text_chunks (List[str]): List of text chunks.
"""
query_str: str
text_chunks: List[str]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "GetResponseStartEvent"
class GetResponseEndEvent(BaseEvent):
"""GetResponseEndEvent."""
# TODO: consumes the first chunk of generators??
# response: RESPONSE_TEXT_TYPE
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "GetResponseEndEvent"
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.7", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.2", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
from typing import cast
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig
from langchain_tests.integration_tests import ChatModelIntegrationTests
from pydantic import BaseModel
from langchain.chat_models import init_chat_model
class multiply(BaseModel):
"""Product of two ints."""
x: int
y: int
@pytest.mark.requires("langchain_openai", "langchain_anthropic")
async def test_init_chat_model_chain() -> None:
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
model_with_tools = model.bind_tools([multiply])
model_with_config = model_with_tools.with_config(
RunnableConfig(tags=["foo"]),
configurable={"bar_model": "claude-3-sonnet-20240229"},
)
prompt = ChatPromptTemplate.from_messages([("system", "foo"), ("human", "{input}")])
chain = prompt | model_with_config
output = chain.invoke({"input": "bar"})
assert isinstance(output, AIMessage)
events = [
event async for event in chain.astream_events({"input": "bar"}, version="v2")
]
assert events
class TestStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return cast(type[BaseChatModel], init_chat_model)
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o", "configurable_fields": "any"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def has_tool_calling(self) -> bool:
return True
@property
def has_structured_output(self) -> bool:
return True
|
from typing import cast
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig
from langchain_tests.integration_tests import ChatModelIntegrationTests
from pydantic import BaseModel
from langchain.chat_models import init_chat_model
class multiply(BaseModel):
"""Product of two ints."""
x: int
y: int
@pytest.mark.requires("langchain_openai", "langchain_anthropic")
async def test_init_chat_model_chain() -> None:
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
model_with_tools = model.bind_tools([multiply])
model_with_config = model_with_tools.with_config(
RunnableConfig(tags=["foo"]),
configurable={"bar_model": "claude-3-sonnet-20240229"},
)
prompt = ChatPromptTemplate.from_messages([("system", "foo"), ("human", "{input}")])
chain = prompt | model_with_config
output = chain.invoke({"input": "bar"})
assert isinstance(output, AIMessage)
events = []
async for event in chain.astream_events({"input": "bar"}, version="v2"):
events.append(event)
assert events
class TestStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return cast(type[BaseChatModel], init_chat_model)
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o", "configurable_fields": "any"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def has_tool_calling(self) -> bool:
return True
@property
def has_structured_output(self) -> bool:
return True
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import itertools
import logging
from typing import Dict, Optional
from mmengine.logging import print_log
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average (EMA) on the model during
training.
Note:
- EMAHook takes priority over CheckpointHook.
- The original model parameters are actually saved in ema field after
train.
Args:
ema_type (str): The type of EMA strategy to use. You can find the
supported strategies in ``mmengine.model.averaged_model``.
Defaults to 'ExponentialMovingAverage'
"""
priority = 'NORMAL'
def __init__(self, ema_type: str = 'ExponentialMovingAverage', **kwargs):
self.ema_cfg = dict(type=ema_type, **kwargs)
def before_run(self, runner) -> None:
"""Create an ema copy of the model."""
model = runner.model
if is_model_wrapper(model):
model = model.module
self.src_model = model
self.ema_model = MODELS.build(
self.ema_cfg, default_args=dict(model=self.src_model))
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ema parameter."""
self.ema_model.update_parameters(self.src_model)
def before_val_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
validation."""
self._swap_ema_parameters()
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after
validation."""
self._swap_ema_parameters()
def before_test_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
test."""
self._swap_ema_parameters()
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after test."""
self._swap_ema_parameters()
def before_save_checkpoint(self, runner, checkpoint: dict) -> None:
"""Save ema parameters to checkpoint."""
checkpoint['ema_state_dict'] = self.ema_model.state_dict()
# Save ema parameters to the source model's state dict so that we can
# directly load the averaged model weights for deployment.
# Swapping the state_dict key-values instead of swapping model
# parameters because the state_dict is a shallow copy of model
# parameters.
self._swap_ema_state_dict(checkpoint)
def after_load_checkpoint(self, runner, checkpoint: dict) -> None:
"""Resume ema parameters from checkpoint."""
if 'ema_state_dict' in checkpoint:
# The original model parameters are actually saved in ema field.
# swap the weights back to resume ema state.
self._swap_ema_state_dict(checkpoint)
self.ema_model.load_state_dict(checkpoint['ema_state_dict'])
# Support load checkpoint without ema state dict.
else:
print_log(
'There is no `ema_state_dict` in checkpoint. '
'`EMAHook` will make a copy of `state_dict` as the '
'initial `ema_state_dict`', 'current', logging.WARNING)
self.ema_model.module.load_state_dict(
copy.deepcopy(checkpoint['state_dict']))
def _swap_ema_parameters(self) -> None:
"""Swap the parameter of model with ema_model."""
avg_param = (
itertools.chain(self.ema_model.module.parameters(),
self.ema_model.module.buffers())
if self.ema_model.update_buffers else
self.ema_model.module.parameters())
src_param = (
itertools.chain(self.src_model.parameters(),
self.src_model.buffers())
if self.ema_model.update_buffers else self.src_model.parameters())
for p_avg, p_src in zip(avg_param, src_param):
tmp = p_avg.data.clone()
p_avg.data.copy_(p_src.data)
p_src.data.copy_(tmp)
def _swap_ema_state_dict(self, checkpoint):
"""Swap the state dict values of model with ema_model."""
model_state = checkpoint['state_dict']
ema_state = checkpoint['ema_state_dict']
for k in ema_state:
if k[:7] == 'module.':
tmp = ema_state[k]
ema_state[k] = model_state[k[7:]]
model_state[k[7:]] = tmp
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Dict, Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average (EMA) on the model during
training.
Note:
- EMAHook takes priority over CheckpointHook.
- The original model parameters are actually saved in ema field after
train.
Args:
ema_type (str): The type of EMA strategy to use. You can find the
supported strategies in ``mmengine.model.averaged_model``.
Defaults to 'ExponentialMovingAverage'
"""
priority = 'NORMAL'
def __init__(self, ema_type: str = 'ExponentialMovingAverage', **kwargs):
self.ema_cfg = dict(type=ema_type, **kwargs)
def before_run(self, runner) -> None:
"""Create an ema copy of the model."""
model = runner.model
if is_model_wrapper(model):
model = model.module
self.src_model = model
self.ema_model = MODELS.build(
self.ema_cfg, default_args=dict(model=self.src_model))
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ema parameter."""
self.ema_model.update_parameters(self.src_model)
def before_val_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
validation."""
self._swap_ema_parameters()
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after
validation."""
self._swap_ema_parameters()
def before_test_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
test."""
self._swap_ema_parameters()
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after test."""
self._swap_ema_parameters()
def before_save_checkpoint(self, runner, checkpoint: dict) -> None:
"""Save ema parameters to checkpoint."""
checkpoint['ema_state_dict'] = self.ema_model.state_dict()
# Save ema parameters to the source model's state dict so that we can
# directly load the averaged model weights for deployment.
# Swapping the state_dict key-values instead of swapping model
# parameters because the state_dict is a shallow copy of model
# parameters.
self._swap_ema_state_dict(checkpoint)
def after_load_checkpoint(self, runner, checkpoint: dict) -> None:
"""Resume ema parameters from checkpoint."""
# The original model parameters are actually saved in ema field.
# swap the weights back to resume ema state.
self._swap_ema_state_dict(checkpoint)
self.ema_model.load_state_dict(checkpoint['ema_state_dict'])
def _swap_ema_parameters(self) -> None:
"""Swap the parameter of model with ema_model."""
avg_param = (
itertools.chain(self.ema_model.module.parameters(),
self.ema_model.module.buffers())
if self.ema_model.update_buffers else
self.ema_model.module.parameters())
src_param = (
itertools.chain(self.src_model.parameters(),
self.src_model.buffers())
if self.ema_model.update_buffers else self.src_model.parameters())
for p_avg, p_src in zip(avg_param, src_param):
tmp = p_avg.data.clone()
p_avg.data.copy_(p_src.data)
p_src.data.copy_(tmp)
def _swap_ema_state_dict(self, checkpoint):
"""Swap the state dict values of model with ema_model."""
model_state = checkpoint['state_dict']
ema_state = checkpoint['ema_state_dict']
for k in ema_state:
if k[:7] == 'module.':
tmp = ema_state[k]
ema_state[k] = model_state[k[7:]]
model_state[k[7:]] = tmp
|
from typing import Any, Literal
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration
from pydantic import BaseModel
from langchain_anthropic.output_parsers import ToolsOutputParser
_CONTENT: list = [
{
"type": "text",
"text": "thought",
},
{"type": "tool_use", "input": {"bar": 0}, "id": "1", "name": "_Foo1"},
{
"type": "text",
"text": "thought",
},
{"type": "tool_use", "input": {"baz": "a"}, "id": "2", "name": "_Foo2"},
]
_RESULT: list = [ChatGeneration(message=AIMessage(_CONTENT))] # type: ignore[misc]
class _Foo1(BaseModel):
bar: int
class _Foo2(BaseModel):
baz: Literal["a", "b"]
def test_tools_output_parser() -> None:
output_parser = ToolsOutputParser()
expected = [
{
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
},
{
"name": "_Foo2",
"args": {"baz": "a"},
"id": "2",
"index": 3,
"type": "tool_call",
},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_args_only() -> None:
output_parser = ToolsOutputParser(args_only=True)
expected = [
{"bar": 0},
{"baz": "a"},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = []
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_first_tool_only() -> None:
output_parser = ToolsOutputParser(first_tool_only=True)
expected: Any = {
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
}
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = None
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_pydantic() -> None:
output_parser = ToolsOutputParser(pydantic_schemas=[_Foo1, _Foo2])
expected = [_Foo1(bar=0), _Foo2(baz="a")]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_empty_content() -> None:
class ChartType(BaseModel):
chart_type: Literal["pie", "line", "bar"]
output_parser = ToolsOutputParser(
first_tool_only=True, pydantic_schemas=[ChartType]
)
message = AIMessage(
"",
tool_calls=[
{
"name": "ChartType",
"args": {"chart_type": "pie"},
"id": "foo",
"type": "tool_call",
}
],
)
actual = output_parser.invoke(message)
expected = ChartType(chart_type="pie")
assert expected == actual
|
from typing import Any, List, Literal
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration
from pydantic import BaseModel
from langchain_anthropic.output_parsers import ToolsOutputParser
_CONTENT: List = [
{
"type": "text",
"text": "thought",
},
{"type": "tool_use", "input": {"bar": 0}, "id": "1", "name": "_Foo1"},
{
"type": "text",
"text": "thought",
},
{"type": "tool_use", "input": {"baz": "a"}, "id": "2", "name": "_Foo2"},
]
_RESULT: List = [ChatGeneration(message=AIMessage(_CONTENT))] # type: ignore[misc]
class _Foo1(BaseModel):
bar: int
class _Foo2(BaseModel):
baz: Literal["a", "b"]
def test_tools_output_parser() -> None:
output_parser = ToolsOutputParser()
expected = [
{
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
},
{
"name": "_Foo2",
"args": {"baz": "a"},
"id": "2",
"index": 3,
"type": "tool_call",
},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_args_only() -> None:
output_parser = ToolsOutputParser(args_only=True)
expected = [
{"bar": 0},
{"baz": "a"},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = []
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_first_tool_only() -> None:
output_parser = ToolsOutputParser(first_tool_only=True)
expected: Any = {
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
}
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = None
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_pydantic() -> None:
output_parser = ToolsOutputParser(pydantic_schemas=[_Foo1, _Foo2])
expected = [_Foo1(bar=0), _Foo2(baz="a")]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_empty_content() -> None:
class ChartType(BaseModel):
chart_type: Literal["pie", "line", "bar"]
output_parser = ToolsOutputParser(
first_tool_only=True, pydantic_schemas=[ChartType]
)
message = AIMessage(
"",
tool_calls=[
{
"name": "ChartType",
"args": {"chart_type": "pie"},
"id": "foo",
"type": "tool_call",
}
],
)
actual = output_parser.invoke(message)
expected = ChartType(chart_type="pie")
assert expected == actual
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImageTensor transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToImagePIL transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImageTensor transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image_tensor(inpt)
class ToImagePIL(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToImagePIL transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor, doc.frame_rate = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor, doc.frame_rate = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.data_elements import DetDataSample
from mmdet.data_elements.mask import BitmapMasks
from mmdet.datasets.transforms import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(2, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.data_elements import DetDataSample
from mmdet.data_elements.mask import BitmapMasks
from mmdet.datasets.transforms import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class IncomeStatementsSchema(BaseModel):
"""Input for IncomeStatements."""
ticker: str = Field(
description="The ticker symbol to fetch income statements for.",
)
period: str = Field(
description="The period of the income statement. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of income statements to return. Default is 10.",
)
class IncomeStatements(BaseTool):
"""
Tool that gets income statements for a given ticker over a given period.
"""
mode: str = "get_income_statements"
name: str = "income_statements"
description: str = (
"A wrapper around financial datasets's Income Statements API. "
"This tool is useful for fetching income statements for a given ticker."
"The tool fetches income statements for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of income statements to return can also be "
"specified using the limit parameter."
)
args_schema: Type[IncomeStatementsSchema] = IncomeStatementsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Income Statements API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class IncomeStatementsSchema(BaseModel):
"""Input for IncomeStatements."""
ticker: str = Field(
description="The ticker symbol to fetch income statements for.",
)
period: str = Field(
description="The period of the income statement. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of income statements to return. Default is 10.",
)
class IncomeStatements(BaseTool): # type: ignore[override, override]
"""
Tool that gets income statements for a given ticker over a given period.
"""
mode: str = "get_income_statements"
name: str = "income_statements"
description: str = (
"A wrapper around financial datasets's Income Statements API. "
"This tool is useful for fetching income statements for a given ticker."
"The tool fetches income statements for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of income statements to return can also be "
"specified using the limit parameter."
)
args_schema: Type[IncomeStatementsSchema] = IncomeStatementsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Income Statements API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch
from mmcv.ops import batched_nms
from mmengine.data import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import InstanceList
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class TridentRoIHead(StandardRoIHead):
"""Trident roi head.
Args:
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
"""
def __init__(self, num_branch: int, test_branch_idx: int,
**kwargs) -> None:
self.num_branch = num_branch
self.test_branch_idx = test_branch_idx
super().__init__(**kwargs)
def merge_trident_bboxes(self,
trident_results: InstanceList) -> InstanceData:
"""Merge bbox predictions of each branch.
Args:
trident_results (List[:obj:`InstanceData`]): A list of InstanceData
predicted from every branch.
Returns:
:obj:`InstanceData`: merged InstanceData.
"""
bboxes = torch.cat([res.bboxes for res in trident_results])
scores = torch.cat([res.scores for res in trident_results])
labels = torch.cat([res.labels for res in trident_results])
nms_cfg = self.test_cfg['nms']
results = InstanceData()
if bboxes.numel() == 0:
results.bboxes = bboxes
results.scores = scores
results.labels = labels
else:
det_bboxes, keep = batched_nms(bboxes, scores, labels, nms_cfg)
results.bboxes = det_bboxes[:, :-1]
results.scores = det_bboxes[:, -1]
results.labels = labels[keep]
if self.test_cfg['max_per_img'] > 0:
results = results[:self.test_cfg['max_per_img']]
return results
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
- Compute prediction bbox and label per branch.
- Merge predictions of each branch according to scores of
bboxes, i.e., bboxes with higher score are kept to give
top-k prediction.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results_list = super().predict(
x=x,
rpn_results_list=rpn_results_list,
batch_data_samples=batch_data_samples,
rescale=rescale)
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
merged_results_list = []
for i in range(len(batch_data_samples) // num_branch):
merged_results_list.append(
self.merge_trident_bboxes(results_list[i * num_branch:(i + 1) *
num_branch]))
return merged_results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch
from mmcv.ops import batched_nms
from mmengine.data import InstanceData
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import InstanceList
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class TridentRoIHead(StandardRoIHead):
"""Trident roi head.
Args:
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
"""
def __init__(self, num_branch: int, test_branch_idx: int,
**kwargs) -> None:
self.num_branch = num_branch
self.test_branch_idx = test_branch_idx
super().__init__(**kwargs)
def merge_trident_bboxes(self,
trident_results: InstanceList) -> InstanceData:
"""Merge bbox predictions of each branch.
Args:
trident_results (List[:obj:`InstanceData`]): A list of InstanceData
predicted from every branch.
Returns:
:obj:`InstanceData`: merged InstanceData.
"""
bboxes = torch.cat([res.bboxes for res in trident_results])
scores = torch.cat([res.scores for res in trident_results])
labels = torch.cat([res.labels for res in trident_results])
nms_cfg = self.test_cfg['nms']
results = InstanceData()
if bboxes.numel() == 0:
results.bboxes = bboxes
results.scores = scores
results.labels = labels
else:
det_bboxes, keep = batched_nms(bboxes, scores, labels, nms_cfg)
results.bboxes = det_bboxes[:, :-1]
results.scores = det_bboxes[:, -1]
results.labels = labels[keep]
if self.test_cfg['max_per_img'] > 0:
results = results[:self.test_cfg['max_per_img']]
return results
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
- Compute prediction bbox and label per branch.
- Merge predictions of each branch according to scores of
bboxes, i.e., bboxes with higher score are kept to give
top-k prediction.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results_list = super().predict(
x=x,
rpn_results_list=rpn_results_list,
batch_data_samples=batch_data_samples,
rescale=rescale)
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
merged_results_list = []
for i in range(len(batch_data_samples) // num_branch):
merged_results_list.append(
self.merge_trident_bboxes(results_list[i * num_branch:(i + 1) *
num_branch]))
return merged_results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
def construct_toy_data(poly2mask, with_boxlist=False):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
results['img'] = img
results['img_shape'] = img.shape[:2]
if with_boxlist:
results['gt_bboxes'] = HorizontalBoxes(
np.array([[1, 0, 2, 2]], dtype=np.float32))
else:
results['gt_bboxes'] = np.array([[1, 0, 2, 2]], dtype=np.float32)
results['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
if poly2mask:
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
else:
raw_masks = [[np.array([1, 2, 1, 0, 2, 1], dtype=np.float32)]]
results['gt_masks'] = PolygonMasks(raw_masks, 3, 4)
results['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 13, 255], [255, 13, 255, 255]],
dtype=np.uint8)
return results
def check_result_same(results, pipeline_results, check_keys):
"""Check whether the ``pipeline_results`` is the same with the predefined
``results``.
Args:
results (dict): Predefined results which should be the standard
output of the transform pipeline.
pipeline_results (dict): Results processed by the transform
pipeline.
check_keys (tuple): Keys that need to be checked between
results and pipeline_results.
"""
for key in check_keys:
if results.get(key, None) is None:
continue
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert_allclose(pipeline_results[key].to_ndarray(),
results[key].to_ndarray())
elif isinstance(results[key], BaseBoxes):
assert_allclose(pipeline_results[key].tensor, results[key].tensor)
else:
assert_allclose(pipeline_results[key], results[key])
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.structures.mask import BitmapMasks, PolygonMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
def construct_toy_data(poly2mask):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
results['img'] = img
results['img_shape'] = img.shape[:2]
results['gt_bboxes'] = np.array([[1, 0, 2, 2]], dtype=np.float32)
results['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
if poly2mask:
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
else:
raw_masks = [[np.array([1, 2, 1, 0, 2, 1], dtype=np.float32)]]
results['gt_masks'] = PolygonMasks(raw_masks, 3, 4)
results['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 13, 255], [255, 13, 255, 255]],
dtype=np.uint8)
return results
def check_result_same(results, pipeline_results, check_keys):
"""Check whether the ``pipeline_results`` is the same with the predefined
``results``.
Args:
results (dict): Predefined results which should be the standard
output of the transform pipeline.
pipeline_results (dict): Results processed by the transform
pipeline.
check_keys (tuple): Keys that need to be checked between
results and pipeline_results.
"""
for key in check_keys:
if results.get(key, None) is None:
continue
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert_allclose(pipeline_results[key].to_ndarray(),
results[key].to_ndarray())
else:
assert_allclose(pipeline_results[key], results[key])
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ReActJsonSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
except Exception as e:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ReActJsonSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
except Exception:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg)
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.14.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.13'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.gateway import BaseGateway as Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.13.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.13'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.gateway import BaseGateway as Gateway
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_community.chat_models import ChatPerplexity
class TestPerplexityStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatPerplexity
@property
def chat_model_params(self) -> dict:
return {"model": "sonar"}
@pytest.mark.xfail(reason="TODO: handle in integration.")
def test_double_messages_conversation(self, model: BaseChatModel) -> None:
super().test_double_messages_conversation(model)
@pytest.mark.xfail(reason="Raises 400: Custom stop words not supported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_community.chat_models import ChatPerplexity
class TestPerplexityStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatPerplexity
@property
def chat_model_params(self) -> dict:
return {"model": "sonar"}
@property
def returns_usage_metadata(self) -> bool:
# TODO: add usage metadata and delete this property
# https://docs.perplexity.ai/api-reference/chat-completions#response-usage
return False
@pytest.mark.xfail(reason="TODO: handle in integration.")
def test_double_messages_conversation(self, model: BaseChatModel) -> None:
super().test_double_messages_conversation(model)
@pytest.mark.xfail(reason="Raises 400: Custom stop words not supported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
|
"""Run smoke tests"""
import torchaudio # noqa: F401
import torchaudio.compliance.kaldi # noqa: F401
import torchaudio.datasets # noqa: F401
import torchaudio.functional # noqa: F401
import torchaudio.models # noqa: F401
import torchaudio.pipelines # noqa: F401
import torchaudio.sox_effects # noqa: F401
import torchaudio.transforms # noqa: F401
import torchaudio.utils # noqa: F401
from torchaudio.io import StreamReader # noqa: F401
|
"""Run smoke tests"""
import torchaudio # noqa: F401
import torchaudio.compliance.kaldi # noqa: F401
import torchaudio.datasets # noqa: F401
import torchaudio.functional # noqa: F401
import torchaudio.models # noqa: F401
import torchaudio.pipelines # noqa: F401
import torchaudio.sox_effects # noqa: F401
import torchaudio.transforms # noqa: F401
import torchaudio.utils # noqa: F401
from torchaudio.prototype.io import Streamer # noqa: F401
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.prompt import (
SQL_FUNCTIONS_SUFFIX,
SQL_PREFIX,
SQL_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SQL_PREFIX": "langchain_community.agent_toolkits.sql.prompt",
"SQL_SUFFIX": "langchain_community.agent_toolkits.sql.prompt",
"SQL_FUNCTIONS_SUFFIX": "langchain_community.agent_toolkits.sql.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["SQL_FUNCTIONS_SUFFIX", "SQL_PREFIX", "SQL_SUFFIX"]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.prompt import (
SQL_FUNCTIONS_SUFFIX,
SQL_PREFIX,
SQL_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SQL_PREFIX": "langchain_community.agent_toolkits.sql.prompt",
"SQL_SUFFIX": "langchain_community.agent_toolkits.sql.prompt",
"SQL_FUNCTIONS_SUFFIX": "langchain_community.agent_toolkits.sql.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["SQL_PREFIX", "SQL_SUFFIX", "SQL_FUNCTIONS_SUFFIX"]
|
# coding: utf-8
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
print("Loading data...")
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / "regression"
df_train = pd.read_csv(str(regression_example_dir / "regression.train"), header=None, sep="\t")
df_test = pd.read_csv(str(regression_example_dir / "regression.test"), header=None, sep="\t")
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
print("Starting training...")
# train
gbm = lgb.LGBMRegressor(num_leaves=31, learning_rate=0.05, n_estimators=20)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric="l1", callbacks=[lgb.early_stopping(5)])
print("Starting predicting...")
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f"The RMSE of prediction is: {rmse_test}")
# feature importances
print(f"Feature importances: {list(gbm.feature_importances_)}")
# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
return "RMSLE", np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False
print("Starting training with custom eval function...")
# train
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric=rmsle, callbacks=[lgb.early_stopping(5)])
# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
return "RAE", np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False
print("Starting training with multiple custom eval functions...")
# train
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric=[rmsle, rae], callbacks=[lgb.early_stopping(5)])
print("Starting predicting...")
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f"The RMSLE of prediction is: {rmsle_test}")
print(f"The RAE of prediction is: {rae_test}")
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {"learning_rate": [0.01, 0.1, 1], "n_estimators": [20, 40]}
gbm = GridSearchCV(estimator, param_grid, cv=3)
gbm.fit(X_train, y_train)
print(f"Best parameters found by grid search are: {gbm.best_params_}")
|
# coding: utf-8
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
print('Starting training...')
# train
gbm = lgb.LGBMRegressor(num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
callbacks=[lgb.early_stopping(5)])
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
# feature importances
print(f'Feature importances: {list(gbm.feature_importances_)}')
# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False
print('Starting training with custom eval function...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=rmsle,
callbacks=[lgb.early_stopping(5)])
# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False
print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=[rmsle, rae],
callbacks=[lgb.early_stopping(5)])
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f'The RMSLE of prediction is: {rmsle_test}')
print(f'The RAE of prediction is: {rae_test}')
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid, cv=3)
gbm.fit(X_train, y_train)
print(f'Best parameters found by grid search are: {gbm.best_params_}')
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import pytest
from jina import Document, Flow
from ...video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(
on='/test',
inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos],
return_results=True,
)
assert resp[0].docs[0].embedding.shape == (512,)
assert resp[0].docs[1].embedding.shape == (512,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, Flow
from ...video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(
on='/test',
inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos],
return_results=True,
)
assert resp[0].docs[0].embedding.shape == (512,)
assert resp[0].docs[1].embedding.shape == (512,)
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING: # pragma: no cover
from docarray.document import Document
class MatchArray(DocumentArrayInMemory):
"""
:class:`MatchArray` inherits from :class:`DocumentArray`.
It's a subset of Documents that represents the matches
:param docs: Set of matches of the `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.adjacency = self._ref_doc.adjacency + 1
def append(self, document: 'Document'):
"""Add a matched document to the current Document.
:param document: Sub-document to be added
"""
document.adjacency = self._ref_doc.adjacency + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""Get the document that this :class:`MatchArray` referring to.
:return: the document the match refers to
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""Get granularity of all document in this array.
:return: the granularity of the documents of which these are match
"""
return self._ref_doc.granularity
@property
def adjacency(self) -> int:
"""Get the adjacency of all document in this array.
:return: the adjacency of the array of matches
"""
return self._ref_doc.adjacency + 1
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING:
from docarray.document import Document
class MatchArray(DocumentArrayInMemory):
"""
:class:`MatchArray` inherits from :class:`DocumentArray`.
It's a subset of Documents that represents the matches
:param docs: Set of matches of the `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.adjacency = self._ref_doc.adjacency + 1
def append(self, document: 'Document'):
"""Add a matched document to the current Document.
:param document: Sub-document to be added
"""
document.adjacency = self._ref_doc.adjacency + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""Get the document that this :class:`MatchArray` referring to.
:return: the document the match refers to
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""Get granularity of all document in this array.
:return: the granularity of the documents of which these are match
"""
return self._ref_doc.granularity
@property
def adjacency(self) -> int:
"""Get the adjacency of all document in this array.
:return: the adjacency of the array of matches
"""
return self._ref_doc.adjacency + 1
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AudioTensor")
class AudioTensor(AnyTensor, AbstractAudioTensor):
"""
Represents an audio tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AudioTensor
class MyAudioDoc(BaseDoc):
tensor: AudioTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyAudioDoc(tensor=tf.zeros(1000, 2))
type(doc.tensor) # AudioTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyAudioDoc(tensor=torch.zeros(1000, 2))
type(doc.tensor) # AudioTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyAudioDoc(tensor=np.zeros((1000, 2)))
type(doc.tensor) # AudioNdArray
'''
---
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(AudioTorchTensor, value)
elif isinstance(value, torch.Tensor):
return AudioTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(AudioTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return AudioTensorFlowTensor._docarray_from_native(value) # noqa
try:
return AudioNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import Union
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor as AudioTFTensor,
)
AudioTensor = AudioNdArray
if tf_available and torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor, AudioTFTensor] # type: ignore
elif tf_available:
AudioTensor = Union[AudioNdArray, AudioTFTensor] # type: ignore
elif torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor] # type: ignore
|
import random
import numpy as np
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
def pad_if_smaller(img, size, fill=0):
min_size = min(img.size)
if min_size < size:
ow, oh = img.size
padh = size - oh if oh < size else 0
padw = size - ow if ow < size else 0
img = F.pad(img, (0, 0, padw, padh), fill=fill)
return img
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomResize:
def __init__(self, min_size, max_size=None):
self.min_size = min_size
if max_size is None:
max_size = min_size
self.max_size = max_size
def __call__(self, image, target):
size = random.randint(self.min_size, self.max_size)
image = F.resize(image, size, antialias=True)
target = F.resize(target, size, interpolation=T.InterpolationMode.NEAREST)
return image, target
class RandomHorizontalFlip:
def __init__(self, flip_prob):
self.flip_prob = flip_prob
def __call__(self, image, target):
if random.random() < self.flip_prob:
image = F.hflip(image)
target = F.hflip(target)
return image, target
class RandomCrop:
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = pad_if_smaller(image, self.size)
target = pad_if_smaller(target, self.size, fill=255)
crop_params = T.RandomCrop.get_params(image, (self.size, self.size))
image = F.crop(image, *crop_params)
target = F.crop(target, *crop_params)
return image, target
class CenterCrop:
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = F.center_crop(image, self.size)
target = F.center_crop(target, self.size)
return image, target
class PILToTensor:
def __call__(self, image, target):
image = F.pil_to_tensor(image)
target = torch.as_tensor(np.array(target), dtype=torch.int64)
return image, target
class ToDtype:
def __init__(self, dtype, scale=False):
self.dtype = dtype
self.scale = scale
def __call__(self, image, target):
if not self.scale:
return image.to(dtype=self.dtype), target
image = F.convert_image_dtype(image, self.dtype)
return image, target
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
|
import random
import numpy as np
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
def pad_if_smaller(img, size, fill=0):
min_size = min(img.size)
if min_size < size:
ow, oh = img.size
padh = size - oh if oh < size else 0
padw = size - ow if ow < size else 0
img = F.pad(img, (0, 0, padw, padh), fill=fill)
return img
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomResize:
def __init__(self, min_size, max_size=None):
self.min_size = min_size
if max_size is None:
max_size = min_size
self.max_size = max_size
def __call__(self, image, target):
size = random.randint(self.min_size, self.max_size)
image = F.resize(image, size, antialias=True)
target = F.resize(target, size, interpolation=T.InterpolationMode.NEAREST)
return image, target
class RandomHorizontalFlip:
def __init__(self, flip_prob):
self.flip_prob = flip_prob
def __call__(self, image, target):
if random.random() < self.flip_prob:
image = F.hflip(image)
target = F.hflip(target)
return image, target
class RandomCrop:
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = pad_if_smaller(image, self.size)
target = pad_if_smaller(target, self.size, fill=255)
crop_params = T.RandomCrop.get_params(image, (self.size, self.size))
image = F.crop(image, *crop_params)
target = F.crop(target, *crop_params)
return image, target
class CenterCrop:
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = F.center_crop(image, self.size)
target = F.center_crop(target, self.size)
return image, target
class PILToTensor:
def __call__(self, image, target):
image = F.pil_to_tensor(image)
target = torch.as_tensor(np.array(target), dtype=torch.int64)
return image, target
class ConvertImageDtype:
def __init__(self, dtype):
self.dtype = dtype
def __call__(self, image, target):
image = F.convert_image_dtype(image, self.dtype)
return image, target
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
|
# flake8: noqa
from typing import Any
from typing_extensions import assert_type
from torch import randn, Tensor
# See ../pass/arithmetic_ops.py for more information
TENSOR, FLOAT = randn(3), 1.5
FLOAT & TENSOR # E: Unsupported operand types for & ("float" and "Tensor")
FLOAT | TENSOR # E: Unsupported operand types for | ("float" and "Tensor")
FLOAT ^ TENSOR # E: Unsupported operand types for ^ ("float" and "Tensor")
# FIXME: false negatives (https://github.com/pytorch/pytorch/issues/155701)
#
# FLOAT << TENSOR # E: Unsupported operand types for & ("float" and "Tensor")
# FLOAT >> TENSOR # E: Unsupported operand types for & ("float" and "Tensor")
#
# TENSOR & FLOAT # E: Unsupported operand types for & ("Tensor" and "float" )
# TENSOR | FLOAT # E: Unsupported operand types for | ("Tensor" and "float" )
# TENSOR ^ FLOAT # E: Unsupported operand types for ^ ("Tensor" and "float" )
# TENSOR << FLOAT # E: Unsupported operand types for & ("Tensor" and "float")
# TENSOR >> FLOAT # E: Unsupported operand types for & ("Tensor" and "float")
|
# flake8: noqa
from typing import Any
from typing_extensions import assert_type
from torch import randn, Tensor
# See ../pass/arithmetic_ops.py for more information
TENSOR, INT, FLOAT = randn(3), 2, 1.5
FLOAT & TENSOR # E: Unsupported operand types for & ("float" and "Tensor")
FLOAT | TENSOR # E: Unsupported operand types for | ("float" and "Tensor")
FLOAT ^ TENSOR # E: Unsupported operand types for ^ ("float" and "Tensor")
# FIXME: false negatives (https://github.com/pytorch/pytorch/issues/155701)
# TENSOR & FLOAT # E: Unsupported operand types for & ("Tensor" and "float" )
# TENSOR | FLOAT # E: Unsupported operand types for | ("Tensor" and "float" )
# TENSOR ^ FLOAT # E: Unsupported operand types for ^ ("Tensor" and "float" )
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=8,
num_workers=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# TODO support auto_scale_lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.imdb import get_word_index as get_word_index
from keras.src.datasets.imdb import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.imdb import get_word_index
from keras.src.datasets.imdb import load_data
|
from docarray.array.document import DocumentArray
from docarray.array.storage.qdrant import StorageMixins, QdrantConfig
__all__ = ['DocumentArrayQdrant', 'QdrantConfig']
class DocumentArrayQdrant(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `Qdrant <https://weaviate.io/>`_ vector search engine.
.. note::
This DocumentArray requires `qdrant-client`. You can install it via `pip install "docarray[qdrant]"`.
To use Qdrant as storage backend, a Qdrant service needs to be running on your machine.
With this implementation, :meth:`match` and :meth:`find` perform fast (approximate) vector search.
Additionally, search with filters is supported.
Example usage:
.. code-block:: python
from docarray import DocumentArray
# connect to running Qdrant service with default configuration (address: http://localhost:6333)
da = DocumentArray(storage='qdrant', config={'n_dim': 10})
# connect to a previously persisted DocumentArrayWeaviate by specifying collection_name, host, and port
da = DocumentArray(
storage='qdrant',
config={
'collection_name': 'persisted',
'host': 'localhost',
'port': '6333',
'n_dim': 10,
},
)
.. seealso::
For further details, see our :ref:`user guide <qdrant>`.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayQdrant`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayQdrant` object
"""
return super().__new__(cls)
|
from .document import DocumentArray
from .storage.qdrant import StorageMixins, QdrantConfig
__all__ = ['DocumentArrayQdrant', 'QdrantConfig']
class DocumentArrayQdrant(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `Qdrant <https://weaviate.io/>`_ vector search engine.
.. note::
This DocumentArray requires `qdrant-client`. You can install it via `pip install "docarray[qdrant]"`.
To use Qdrant as storage backend, a Qdrant service needs to be running on your machine.
With this implementation, :meth:`match` and :meth:`find` perform fast (approximate) vector search.
Additionally, search with filters is supported.
Example usage:
.. code-block:: python
from docarray import DocumentArray
# connect to running Qdrant service with default configuration (address: http://localhost:6333)
da = DocumentArray(storage='qdrant', config={'n_dim': 10})
# connect to a previously persisted DocumentArrayWeaviate by specifying collection_name, host, and port
da = DocumentArray(
storage='qdrant',
config={
'collection_name': 'persisted',
'host': 'localhost',
'port': '6333',
'n_dim': 10,
},
)
.. seealso::
For further details, see our :ref:`user guide <qdrant>`.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayQdrant`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayQdrant` object
"""
return super().__new__(cls)
|
from datetime import datetime, timedelta
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotEngagementBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (send_email, track_engagement)",
default="send_email",
)
email_data: dict = SchemaField(
description="Email data including recipient, subject, content",
default={},
)
contact_id: str = SchemaField(
description="Contact ID for engagement tracking", default=""
)
timeframe_days: int = SchemaField(
description="Number of days to look back for engagement",
default=30,
)
class Output(BlockSchema):
result: dict = SchemaField(description="Operation result")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="c6524385-7d87-49d6-a470-248bd29ca765",
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
input_schema=HubSpotEngagementBlock.Input,
output_schema=HubSpotEngagementBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "send_email":
# Using the email send API
email_url = f"{base_url}/crm/v3/objects/emails"
email_data = {
"properties": {
"hs_timestamp": datetime.now().isoformat(),
"hubspot_owner_id": "1", # This should be configurable
"hs_email_direction": "OUTBOUND",
"hs_email_status": "SEND",
"hs_email_subject": input_data.email_data.get("subject"),
"hs_email_text": input_data.email_data.get("content"),
"hs_email_to_email": input_data.email_data.get("recipient"),
}
}
response = requests.post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
elif input_data.operation == "track_engagement":
# Get engagement events for the contact
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
engagement_url = (
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
)
params = {"limit": 100, "after": from_date.isoformat()}
response = requests.get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics
metrics = {
"email_opens": 0,
"email_clicks": 0,
"email_replies": 0,
"last_engagement": None,
"engagement_score": 0,
}
for engagement in engagements.get("results", []):
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
if eng_type == "EMAIL":
metrics["email_opens"] += 1
elif eng_type == "EMAIL_CLICK":
metrics["email_clicks"] += 1
elif eng_type == "EMAIL_REPLY":
metrics["email_replies"] += 1
# Update last engagement time
eng_time = engagement.get("properties", {}).get("hs_timestamp")
if eng_time and (
not metrics["last_engagement"]
or eng_time > metrics["last_engagement"]
):
metrics["last_engagement"] = eng_time
# Calculate simple engagement score
metrics["engagement_score"] = (
metrics["email_opens"]
+ metrics["email_clicks"] * 2
+ metrics["email_replies"] * 3
)
yield "result", metrics
yield "status", "engagement_tracked"
|
from datetime import datetime, timedelta
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotEngagementBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (send_email, track_engagement)",
default="send_email",
)
email_data: dict = SchemaField(
description="Email data including recipient, subject, content",
default={},
)
contact_id: str = SchemaField(
description="Contact ID for engagement tracking", default=""
)
timeframe_days: int = SchemaField(
description="Number of days to look back for engagement",
default=30,
optional=True,
)
class Output(BlockSchema):
result: dict = SchemaField(description="Operation result")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="c6524385-7d87-49d6-a470-248bd29ca765",
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
input_schema=HubSpotEngagementBlock.Input,
output_schema=HubSpotEngagementBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "send_email":
# Using the email send API
email_url = f"{base_url}/crm/v3/objects/emails"
email_data = {
"properties": {
"hs_timestamp": datetime.now().isoformat(),
"hubspot_owner_id": "1", # This should be configurable
"hs_email_direction": "OUTBOUND",
"hs_email_status": "SEND",
"hs_email_subject": input_data.email_data.get("subject"),
"hs_email_text": input_data.email_data.get("content"),
"hs_email_to_email": input_data.email_data.get("recipient"),
}
}
response = requests.post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
elif input_data.operation == "track_engagement":
# Get engagement events for the contact
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
engagement_url = (
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
)
params = {"limit": 100, "after": from_date.isoformat()}
response = requests.get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics
metrics = {
"email_opens": 0,
"email_clicks": 0,
"email_replies": 0,
"last_engagement": None,
"engagement_score": 0,
}
for engagement in engagements.get("results", []):
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
if eng_type == "EMAIL":
metrics["email_opens"] += 1
elif eng_type == "EMAIL_CLICK":
metrics["email_clicks"] += 1
elif eng_type == "EMAIL_REPLY":
metrics["email_replies"] += 1
# Update last engagement time
eng_time = engagement.get("properties", {}).get("hs_timestamp")
if eng_time and (
not metrics["last_engagement"]
or eng_time > metrics["last_engagement"]
):
metrics["last_engagement"] = eng_time
# Calculate simple engagement score
metrics["engagement_score"] = (
metrics["email_opens"]
+ metrics["email_clicks"] * 2
+ metrics["email_replies"] * 3
)
yield "result", metrics
yield "status", "engagement_tracked"
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
def to_strawberry_type(self) -> List['StrawberryDocument']:
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_strawberry_type() for d in self]
@classmethod
def from_strawberry_type(cls: Type['T'], model: List['StrawberryDocument']) -> 'T':
"""Convert a list of Strawberry into DocumentArray
:param model: the list of strawberry type objects that represents a DocumentArray
:return: a DocumentArray
"""
from docarray import Document
return cls(Document.from_strawberry_type(m) for m in model)
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
def to_strawberry_type(self) -> List['StrawberryDocument']:
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_strawberry_type() for d in self]
@classmethod
def from_strawberry_type(cls: Type['T'], model: List['StrawberryDocument']) -> 'T':
"""Convert a list of Strawberry into DocumentArray
:param model: the list of strawberry type objects that represents a DocumentArray
:return: a DocumentArray
"""
from docarray import Document
return cls(Document.from_strawberry_type(m) for m in model)
|
import re
from io import BytesIO
from pathlib import Path
from typing import Any, Type
import numpy as np
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_loaders.parsers.images import (
LLMImageBlobParser,
RapidOCRBlobParser,
TesseractBlobParser,
)
path_base = Path(__file__).parent.parent.parent
building_image = Blob.from_path(path_base / "examples/building.jpg")
text_image = Blob.from_path(path_base / "examples/text.png")
page_image = Blob.from_path(path_base / "examples/page.png")
_re_in_image = r"(?ms).*MAKE.*TEXT.*STAND.*OUT.*FROM.*"
@pytest.mark.parametrize(
"blob,body",
[
(Blob.from_path(path_base / "examples/text-gray.png"), _re_in_image),
],
)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_differents_files(
blob_loader: Type,
kw: dict[str, Any],
blob: Blob,
body: str,
) -> None:
if blob_loader == LLMImageBlobParser and "building" in str(blob.path):
body = ".*building.*"
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
assert re.compile(body).match(documents[0].page_content)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_numpy(
blob_loader: Type,
kw: dict[str, Any],
) -> None:
gray_image = np.empty(shape=(412, 1652, 1))
with BytesIO() as buffer:
np.save(buffer, gray_image)
buffer.seek(0)
npy_bytes = buffer.getvalue()
blob = Blob.from_data(npy_bytes, mime_type="application/x-npy")
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
|
import re
from pathlib import Path
from typing import Any, Type
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_loaders.parsers.images import (
LLMImageBlobParser,
RapidOCRBlobParser,
TesseractBlobParser,
)
path_base = Path(__file__).parent.parent.parent
building_image = Blob.from_path(path_base / "examples/building.jpg")
text_image = Blob.from_path(path_base / "examples/text.png")
page_image = Blob.from_path(path_base / "examples/page.png")
@pytest.mark.parametrize(
"blob,body",
[
(building_image, ""),
(text_image, r"(?ms).*MAKE.*TEXT.*STAND.*OUT.*FROM.*BACKGROUNDS.*"),
],
)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_differents_files(
blob_loader: Type,
kw: dict[str, Any],
blob: Blob,
body: str,
) -> None:
if blob_loader == LLMImageBlobParser and "building" in str(blob.path):
body = ".*building.*"
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
assert re.compile(body).match(documents[0].page_content)
|
from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.
This class is deprecated. See below for an alternative implementation.
Advantages of this implementation include:
- Supports async and streaming;
- Surfaces prompt and text splitter for easier customization;
- Use of JsonOutputParser supports JSONPatch operations in streaming mode,
as well as robustness to markdown.
.. code-block:: python
from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
# Note: import PROMPT if using a legacy non-chat model.
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
llm = ChatOpenAI()
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
split_text = RunnableLambda(
lambda x: text_splitter.create_documents([x])
)
chain = RunnableParallel(
text=RunnablePassthrough(),
questions=(
split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
)
)
"""
llm_chain: LLMChain
"""LLM Chain that generates responses from user input and context."""
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
)
"""Text splitter that splits the input into chunks."""
input_key: str = "text"
"""Key of the input to the chain."""
output_key: str = "questions"
"""Key of the output of the chain."""
k: Optional[int] = None
"""Number of questions to generate."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> QAGenerationChain:
"""
Create a QAGenerationChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
Returns:
a QAGenerationChain class
"""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
def input_keys(self) -> list[str]:
return [self.input_key]
@property
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, list]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs], run_manager=run_manager
)
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
|
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.
This class is deprecated. See below for an alternative implementation.
Advantages of this implementation include:
- Supports async and streaming;
- Surfaces prompt and text splitter for easier customization;
- Use of JsonOutputParser supports JSONPatch operations in streaming mode,
as well as robustness to markdown.
.. code-block:: python
from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
# Note: import PROMPT if using a legacy non-chat model.
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
llm = ChatOpenAI()
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
split_text = RunnableLambda(
lambda x: text_splitter.create_documents([x])
)
chain = RunnableParallel(
text=RunnablePassthrough(),
questions=(
split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
)
)
"""
llm_chain: LLMChain
"""LLM Chain that generates responses from user input and context."""
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
)
"""Text splitter that splits the input into chunks."""
input_key: str = "text"
"""Key of the input to the chain."""
output_key: str = "questions"
"""Key of the output of the chain."""
k: Optional[int] = None
"""Number of questions to generate."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> QAGenerationChain:
"""
Create a QAGenerationChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
Returns:
a QAGenerationChain class
"""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
return [self.output_key]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, List]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs], run_manager=run_manager
)
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39
@pytest.fixture(
name="client",
params=[
"tutorial012",
pytest.param("tutorial012_py39", marks=needs_py39),
"tutorial012_an",
pytest.param("tutorial012_an_py39", marks=needs_py39),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(
f"docs_src.query_params_str_validations.{request.param}"
)
client = TestClient(mod.app)
return client
def test_default_query_values(client: TestClient):
url = "/items/"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": ["foo", "bar"]}
def test_multi_query_values(client: TestClient):
url = "/items/?q=baz&q=foobar"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": ["baz", "foobar"]}
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {"type": "string"},
"default": ["foo", "bar"],
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
from fastapi.testclient import TestClient
from docs_src.query_params_str_validations.tutorial012 import app
client = TestClient(app)
def test_default_query_values():
url = "/items/"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": ["foo", "bar"]}
def test_multi_query_values():
url = "/items/?q=baz&q=foobar"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": ["baz", "foobar"]}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {"type": "string"},
"default": ["foo", "bar"],
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEF1Evaluator, CESoftmaxAccuracyEvaluator
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row["label"]]
if row["split"] == "train":
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
else:
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder("distilroberta-base", num_labels=len(label2int))
# We wrap train_samples, which is a list of InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
accuracy_evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name="AllNLI-dev")
f1_evaluator = CEF1Evaluator.from_input_examples(dev_samples, name="AllNLI-dev")
evaluator = SequentialEvaluator([accuracy_evaluator, f1_evaluator])
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEF1Evaluator, CESoftmaxAccuracyEvaluator
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row["label"]]
if row["split"] == "train":
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
else:
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder("distilroberta-base", num_labels=len(label2int))
# We wrap train_samples, which is a list of InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
accuracy_evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name="AllNLI-dev")
f1_evaluator = CEF1Evaluator.from_input_examples(dev_samples, name="AllNLI-dev")
evaluator = SequentialEvaluator([accuracy_evaluator, f1_evaluator])
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
from mmengine.utils import digit_version
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if digit_version(torch.__version__) >= digit_version('1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion script for stable diffusion checkpoints which _only_ contain a controlnet."""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
# small workaround to get argparser to parse a boolean input as either true _or_ false
def parse_bool(string):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}")
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
args = parser.parse_args()
controlnet = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion script for stable diffusion checkpoints which _only_ contain a controlnet."""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
# small workaround to get argparser to parse a boolean input as either true _or_ false
def parse_bool(string):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}")
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
args = parser.parse_args()
controlnet = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=0.22.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
"""Tests for the Google Cloud DocAI parser."""
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence(mock_credential: MagicMock, mock_client: MagicMock) -> None:
endpoint = "endpoint"
key = "key"
parser = AzureAIDocumentIntelligenceParser(api_endpoint=endpoint, api_key=key)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence_with_analysis_features(
mock_credential: MagicMock, mock_client: MagicMock
) -> None:
endpoint = "endpoint"
key = "key"
analysis_features = ["ocrHighResolution", "barcodes"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
with pytest.raises(ValueError):
analysis_features = ["invalid"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
|
"""Tests for the Google Cloud DocAI parser."""
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence(mock_credential: MagicMock, mock_client: MagicMock) -> None:
endpoint = "endpoint"
key = "key"
parser = AzureAIDocumentIntelligenceParser(api_endpoint=endpoint, api_key=key)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
features=None,
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence_with_analysis_features(
mock_credential: MagicMock, mock_client: MagicMock
) -> None:
endpoint = "endpoint"
key = "key"
analysis_features = ["ocrHighResolution", "barcodes"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
features=analysis_features,
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
with pytest.raises(ValueError):
analysis_features = ["invalid"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmdet.registry import TASK_UTILS
PRIOR_GENERATORS = TASK_UTILS
ANCHOR_GENERATORS = TASK_UTILS
def build_prior_generator(cfg, default_args=None):
warnings.warn(
'``build_prior_generator`` would be deprecated soon, please use '
'``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args)
def build_anchor_generator(cfg, default_args=None):
warnings.warn(
'``build_anchor_generator`` would be deprecated soon, please use '
'``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmcv.utils import Registry, build_from_cfg
PRIOR_GENERATORS = Registry('Generator for anchors and points')
ANCHOR_GENERATORS = PRIOR_GENERATORS
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None):
warnings.warn(
'``build_anchor_generator`` would be deprecated soon, please use '
'``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args)
|
from typing import Any, Optional, Union, cast
from langchain_core.messages import AIMessage, ToolCall
from langchain_core.messages.tool import tool_call
from langchain_core.output_parsers import BaseGenerationOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from pydantic import BaseModel, ConfigDict
class ToolsOutputParser(BaseGenerationOutputParser):
"""Output parser for tool calls."""
first_tool_only: bool = False
"""Whether to return only the first tool call."""
args_only: bool = False
"""Whether to return only the arguments of the tool calls."""
pydantic_schemas: Optional[list[type[BaseModel]]] = None
"""Pydantic schemas to parse tool calls into."""
model_config = ConfigDict(
extra="forbid",
)
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
if not result or not isinstance(result[0], ChatGeneration):
return None if self.first_tool_only else []
message = cast(AIMessage, result[0].message)
tool_calls: list = [
dict(tc) for tc in _extract_tool_calls_from_message(message)
]
if isinstance(message.content, list):
# Map tool call id to index
id_to_index = {
block["id"]: i
for i, block in enumerate(message.content)
if isinstance(block, dict) and block["type"] == "tool_use"
}
tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls]
if self.pydantic_schemas:
tool_calls = [self._pydantic_parse(tc) for tc in tool_calls]
elif self.args_only:
tool_calls = [tc["args"] for tc in tool_calls]
else:
pass
if self.first_tool_only:
return tool_calls[0] if tool_calls else None
else:
return [tool_call for tool_call in tool_calls]
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
tool_call["name"]
]
return cls_(**tool_call["args"])
def _extract_tool_calls_from_message(message: AIMessage) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if message.tool_calls:
return message.tool_calls
return extract_tool_calls(message.content)
def extract_tool_calls(content: Union[str, list[Union[str, dict]]]) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if isinstance(content, list):
tool_calls = []
for block in content:
if isinstance(block, str):
continue
if block["type"] != "tool_use":
continue
tool_calls.append(
tool_call(name=block["name"], args=block["input"], id=block["id"])
)
return tool_calls
else:
return []
|
from typing import Any, List, Optional, Type, Union, cast
from langchain_core.messages import AIMessage, ToolCall
from langchain_core.messages.tool import tool_call
from langchain_core.output_parsers import BaseGenerationOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from pydantic import BaseModel, ConfigDict
class ToolsOutputParser(BaseGenerationOutputParser):
"""Output parser for tool calls."""
first_tool_only: bool = False
"""Whether to return only the first tool call."""
args_only: bool = False
"""Whether to return only the arguments of the tool calls."""
pydantic_schemas: Optional[List[Type[BaseModel]]] = None
"""Pydantic schemas to parse tool calls into."""
model_config = ConfigDict(
extra="forbid",
)
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
if not result or not isinstance(result[0], ChatGeneration):
return None if self.first_tool_only else []
message = cast(AIMessage, result[0].message)
tool_calls: List = [
dict(tc) for tc in _extract_tool_calls_from_message(message)
]
if isinstance(message.content, list):
# Map tool call id to index
id_to_index = {
block["id"]: i
for i, block in enumerate(message.content)
if isinstance(block, dict) and block["type"] == "tool_use"
}
tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls]
if self.pydantic_schemas:
tool_calls = [self._pydantic_parse(tc) for tc in tool_calls]
elif self.args_only:
tool_calls = [tc["args"] for tc in tool_calls]
else:
pass
if self.first_tool_only:
return tool_calls[0] if tool_calls else None
else:
return [tool_call for tool_call in tool_calls]
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
tool_call["name"]
]
return cls_(**tool_call["args"])
def _extract_tool_calls_from_message(message: AIMessage) -> List[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if message.tool_calls:
return message.tool_calls
return extract_tool_calls(message.content)
def extract_tool_calls(content: Union[str, List[Union[str, dict]]]) -> List[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if isinstance(content, list):
tool_calls = []
for block in content:
if isinstance(block, str):
continue
if block["type"] != "tool_use":
continue
tool_calls.append(
tool_call(name=block["name"], args=block["input"], id=block["id"])
)
return tool_calls
else:
return []
|
from datetime import datetime
import pytest
from jina import Document, DocumentArray, Flow
class MyOwnException(Exception):
pass
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_invalid_input_raise(protocol):
f = Flow(protocol=protocol).add()
try:
with f:
da = DocumentArray([Document(text='hello', tags={'date': datetime.now()})])
try:
f.post(
on='/', inputs=da
) # process should stop here and raise an exception
except Exception:
raise MyOwnException
assert False
except MyOwnException:
pass
|
import pytest
from datetime import datetime
from jina import Flow, DocumentArray, Document
class MyOwnException(Exception):
pass
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_invalid_input_raise(protocol):
f = Flow(protocol=protocol).add()
try:
with f:
da = DocumentArray([Document(text='hello', tags={'date': datetime.now()})])
try:
f.post(on='/', inputs=da) # process should stop here and raise an exception
except Exception:
raise MyOwnException
assert False
except MyOwnException:
pass
|
from langchain_core.prompts.prompt import PromptTemplate
KG_TRIPLE_DELIMITER = "<|>"
_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = (
"You are a networked intelligence helping a human track knowledge triples"
" about all relevant people, things, concepts, etc. and integrating"
" them with your knowledge stored within your weights"
" as well as that stored in a knowledge graph."
" Extract all of the knowledge triples from the text."
" A knowledge triple is a clause that contains a subject, a predicate,"
" and an object. The subject is the entity being described,"
" the predicate is the property of the subject that is being"
" described, and the object is the value of the property.\n\n"
"EXAMPLE\n"
"It's a state in the US. It's also the number 1 producer of gold in the US.\n\n"
f"Output: (Nevada, is a, state){KG_TRIPLE_DELIMITER}(Nevada, is in, US)"
f"{KG_TRIPLE_DELIMITER}(Nevada, is the number 1 producer of, gold)\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"I'm going to the store.\n\n"
"Output: NONE\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\n"
f"Output: (Descartes, likes to drive, antique scooters){KG_TRIPLE_DELIMITER}(Descartes, plays, mandolin)\n" # noqa: E501
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"{text}"
"Output:"
)
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["text"],
template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE,
)
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
KG_TRIPLE_DELIMITER = "<|>"
_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = (
"You are a networked intelligence helping a human track knowledge triples"
" about all relevant people, things, concepts, etc. and integrating"
" them with your knowledge stored within your weights"
" as well as that stored in a knowledge graph."
" Extract all of the knowledge triples from the text."
" A knowledge triple is a clause that contains a subject, a predicate,"
" and an object. The subject is the entity being described,"
" the predicate is the property of the subject that is being"
" described, and the object is the value of the property.\n\n"
"EXAMPLE\n"
"It's a state in the US. It's also the number 1 producer of gold in the US.\n\n"
f"Output: (Nevada, is a, state){KG_TRIPLE_DELIMITER}(Nevada, is in, US)"
f"{KG_TRIPLE_DELIMITER}(Nevada, is the number 1 producer of, gold)\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"I'm going to the store.\n\n"
"Output: NONE\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\n"
f"Output: (Descartes, likes to drive, antique scooters){KG_TRIPLE_DELIMITER}(Descartes, plays, mandolin)\n"
"END OF EXAMPLE\n\n"
"EXAMPLE\n"
"{text}"
"Output:"
)
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["text"],
template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE,
)
|
"""Argparser module for container runtimes"""
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_container_runtime_parser(parser, pod_type: str = 'executor'):
"""Mixing in arguments required by :class:`ContainerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
:param pod_type: the pod_type configured by the parser. Can be either 'executor' for an Executor pod or 'gateway' for a Gateway pod
"""
gp = add_arg_group(parser, title='ContainerRuntime')
gp.add_argument(
'--entrypoint',
type=str,
help='The entrypoint command overrides the ENTRYPOINT in Docker image. '
'when not set then the Docker image ENTRYPOINT takes effective.',
)
gp.add_argument(
'--docker-kwargs',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
''',
)
if pod_type == 'executor':
gp.add_argument(
'--volumes',
type=str,
nargs='*',
metavar='DIR',
help='''
The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
''',
)
gp.add_argument(
'--gpus',
type=str,
help=f'''
This argument allows dockerized Jina Executors to discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
''',
)
gp.add_argument(
'--disable-auto-volume',
action='store_true',
default=False,
help=f'Do not automatically mount a volume for dockerized Executors.',
)
|
"""Argparser module for container runtimes"""
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_container_runtime_parser(parser):
"""Mixing in arguments required by :class:`ContainerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='ContainerRuntime')
gp.add_argument(
'--entrypoint',
type=str,
help='The entrypoint command overrides the ENTRYPOINT in Docker image. '
'when not set then the Docker image ENTRYPOINT takes effective.',
)
gp.add_argument(
'--docker-kwargs',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
''',
)
gp.add_argument(
'--volumes',
type=str,
nargs='*',
metavar='DIR',
help='''
The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
''',
)
gp.add_argument(
'--gpus',
type=str,
help='''
This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
''',
)
gp.add_argument(
'--disable-auto-volume',
action='store_true',
default=False,
help='Do not automatically mount a volume for dockerized Executors.',
)
|
import os
from pathlib import Path
import pytest
from jina.hubble import HubExecutor, hubapi
from jina.hubble.hubapi import list_local
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_zip_file():
return Path(__file__).parent / 'dummy_executor.zip'
@pytest.fixture
def test_executor():
return HubExecutor(uuid='hello', name=None, commit_id='test_commit', tag='v0')
@pytest.mark.parametrize('install_deps', [True, False])
def test_install_local(executor_zip_file, test_executor, install_deps):
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
hubapi.install_local(executor_zip_file, test_executor, install_deps=install_deps)
assert hubapi.exist_local(test_executor.uuid, test_executor.tag)
assert any(
str(path).endswith(
f'{os.path.join(test_executor.uuid, test_executor.tag)}.dist-info'
)
for path in list_local()
)
hubapi.uninstall_local(test_executor.uuid)
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
def test_load_dump_secret():
import tempfile
uuid8 = 'hello'
secret = 'world'
with tempfile.TemporaryDirectory() as tmpdirname:
hubapi.dump_secret(Path(tmpdirname), uuid8, secret)
new_uuid8, new_secret = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
def test_load_dump_secret_existing_encryption_key():
import tempfile
uuid8 = 'hello'
secret = 'world'
with tempfile.TemporaryDirectory() as tmpdirname:
# creates an encryption key
hubapi.dump_secret(Path(tmpdirname), 'dummy', 'dummy')
# dump secret using existing encryption key
hubapi.dump_secret(Path(tmpdirname), uuid8, secret)
new_uuid8, new_secret = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
|
import os
from pathlib import Path
import pytest
from jina.hubble import HubExecutor, hubapi
from jina.hubble.hubapi import list_local
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_zip_file():
return Path(__file__).parent / 'dummy_executor.zip'
@pytest.fixture
def test_executor():
return HubExecutor(uuid='hello', name=None, commit_id='test_commit', tag='v0')
@pytest.mark.parametrize('install_deps', [True, False])
def test_install_local(test_envs, executor_zip_file, test_executor, install_deps):
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
hubapi.install_local(executor_zip_file, test_executor, install_deps=install_deps)
assert hubapi.exist_local(test_executor.uuid, test_executor.tag)
assert any(
str(path).endswith(
f'{os.path.join(test_executor.uuid, test_executor.tag)}.dist-info'
)
for path in list_local()
)
hubapi.uninstall_local(test_executor.uuid)
assert not hubapi.exist_local(test_executor.uuid, test_executor.tag)
def test_load_dump_secret(test_envs):
import tempfile
uuid8 = 'hello'
secret = 'world'
with tempfile.TemporaryDirectory() as tmpdirname:
hubapi.dump_secret(Path(tmpdirname), uuid8, secret)
new_uuid8, new_secret = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
def test_load_dump_secret_existing_encryption_key(test_envs):
import tempfile
uuid8 = 'hello'
secret = 'world'
with tempfile.TemporaryDirectory() as tmpdirname:
# creates an encryption key
hubapi.dump_secret(Path(tmpdirname), 'dummy', 'dummy')
# dump secret using existing encryption key
hubapi.dump_secret(Path(tmpdirname), uuid8, secret)
new_uuid8, new_secret = hubapi.load_secret(Path(tmpdirname))
assert new_uuid8 == uuid8
assert new_secret == secret
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import PYDANTIC_MAJOR_VERSION
if PYDANTIC_MAJOR_VERSION < 2:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
elif issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
else:
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import PYDANTIC_MAJOR_VERSION
if PYDANTIC_MAJOR_VERSION < 2:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
elif issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
else:
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
# training schedule for 20e
train_cfg = dict(by_epoch=True, max_epochs=20)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=20,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
# training schedule for 20e
train_cfg = dict(by_epoch=True, max_epochs=20)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=20,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
import pytest
import torch
from executor.models import EmbeddingModelWrapper, _ModelCatalogue
@pytest.mark.parametrize(
['model_name', 'is_supported'],
[
('ResNet', False),
('resnet18', True),
('resnet50', True),
('alexnet', True),
('xResNet', False),
('Alexnet', False),
],
)
def test_is_model_supported(model_name: str, is_supported: bool):
assert _ModelCatalogue.is_model_supported(model_name) == is_supported
@pytest.mark.parametrize(
['model_name', 'layer'],
[
('alexnet', 'features'),
('vgg11', 'features'),
('squeezenet1_0', 'features'),
('densenet121', 'features'),
('mnasnet0_5', 'layers'),
('mobilenet_v2', 'features'),
],
)
def test_is_correct_layer(model_name: str, layer: str):
assert _ModelCatalogue.get_layer_name(model_name) == layer
@pytest.mark.parametrize(
['model_name', 'dim'], [('mobilenet_v2', 1280), ('resnet18', 512)]
)
def test_get_features(model_name: str, dim: int):
model_wrapper = EmbeddingModelWrapper(model_name)
embeddings = model_wrapper.compute_embeddings(
np.ones((10, 3, 224, 224), dtype=np.float32)
)
assert embeddings.shape == (10, dim)
@pytest.mark.parametrize(
'feature_map',
[
np.ones((1, 10, 10, 3)),
np.random.rand(1, 224, 224, 3),
np.zeros((1, 100, 100, 3)),
],
)
def test_get_pooling(
feature_map: np.ndarray,
):
wrapper = EmbeddingModelWrapper('mobilenet_v2')
feature_map_after_pooling = wrapper._pooling_function(torch.from_numpy(feature_map))
np.testing.assert_array_almost_equal(
feature_map_after_pooling, np.mean(feature_map, axis=(2, 3))
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires GPU and CUDA')
def test_get_features_gpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2')
arr_in = np.ones((2, 3, 10, 10), dtype=np.float32)
encodings = (
wrapper.get_features(torch.from_numpy(arr_in).to(wrapper.device))
.detach()
.cpu()
.numpy()
)
assert encodings.shape == (2, 1280, 1, 1)
def test_get_features_cpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2', device='cpu')
arr_in = np.ones((2, 3, 224, 224), dtype=np.float32)
encodings = wrapper.get_features(torch.from_numpy(arr_in)).detach().numpy()
assert encodings.shape[1] == 1280
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
import pytest
import torch
from ...models import EmbeddingModelWrapper, _ModelCatalogue
@pytest.mark.parametrize(
['model_name', 'is_supported'],
[
('ResNet', False),
('resnet18', True),
('resnet50', True),
('alexnet', True),
('xResNet', False),
('Alexnet', False),
],
)
def test_is_model_supported(model_name: str, is_supported: bool):
assert _ModelCatalogue.is_model_supported(model_name) == is_supported
@pytest.mark.parametrize(
['model_name', 'layer'],
[
('alexnet', 'features'),
('vgg11', 'features'),
('squeezenet1_0', 'features'),
('densenet121', 'features'),
('mnasnet0_5', 'layers'),
('mobilenet_v2', 'features'),
],
)
def test_is_correct_layer(model_name: str, layer: str):
assert _ModelCatalogue.get_layer_name(model_name) == layer
@pytest.mark.parametrize(
['model_name', 'dim'], [('mobilenet_v2', 1280), ('resnet18', 512)]
)
def test_get_features(model_name: str, dim: int):
model_wrapper = EmbeddingModelWrapper(model_name)
embeddings = model_wrapper.compute_embeddings(
np.ones((10, 3, 224, 224), dtype=np.float32)
)
assert embeddings.shape == (10, dim)
@pytest.mark.parametrize(
'feature_map',
[
np.ones((1, 10, 10, 3)),
np.random.rand(1, 224, 224, 3),
np.zeros((1, 100, 100, 3)),
],
)
def test_get_pooling(
feature_map: np.ndarray,
):
wrapper = EmbeddingModelWrapper('mobilenet_v2')
feature_map_after_pooling = wrapper._pooling_function(torch.from_numpy(feature_map))
np.testing.assert_array_almost_equal(
feature_map_after_pooling, np.mean(feature_map, axis=(2, 3))
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires GPU and CUDA')
def test_get_features_gpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2')
arr_in = np.ones((2, 3, 10, 10), dtype=np.float32)
encodings = (
wrapper.get_features(torch.from_numpy(arr_in).to(wrapper.device))
.detach()
.cpu()
.numpy()
)
assert encodings.shape == (2, 1280, 1, 1)
def test_get_features_cpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2', device='cpu')
arr_in = np.ones((2, 3, 224, 224), dtype=np.float32)
encodings = wrapper.get_features(torch.from_numpy(arr_in)).detach().numpy()
assert encodings.shape[1] == 1280
|
from jina import DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
def test_no_documents():
test_docs = DocumentArray()
f = Flow().add(uses=CLIPTextEncoder)
with f:
f.search(test_docs, {})
assert len(test_docs) == 0 # SUCCESS
|
from jina import DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
def test_no_documents():
test_docs = DocumentArray()
f = Flow().add(uses=CLIPTextEncoder)
with f:
f.search(test_docs, {})
assert len(test_docs) == 0 # SUCCESS
|
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .RerankingEvaluator import RerankingEvaluator
from .SentenceEvaluator import SentenceEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .SimilarityFunction import SimilarityFunction
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
from .SentenceEvaluator import SentenceEvaluator
from .SimilarityFunction import SimilarityFunction
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
from .RerankingEvaluator import RerankingEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from __future__ import annotations
import functools
import logging
logger = logging.getLogger(__name__)
def cross_encoder_init_args_decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
kwargs_renamed_mapping = {
"model_name": "model_name_or_path",
"automodel_args": "model_kwargs",
"tokenizer_args": "tokenizer_kwargs",
"config_args": "config_kwargs",
"cache_dir": "cache_folder",
"default_activation_function": "activation_fn",
}
for old_name, new_name in kwargs_renamed_mapping.items():
if old_name in kwargs:
kwarg_value = kwargs.pop(old_name)
logger.warning(
f"The CrossEncoder `{old_name}` argument was renamed and is now deprecated, please use `{new_name}` instead."
)
if new_name not in kwargs:
kwargs[new_name] = kwarg_value
if "classifier_dropout" in kwargs:
classifier_dropout = kwargs.pop("classifier_dropout")
logger.warning(
f"The CrossEncoder `classifier_dropout` argument is deprecated. Please use `config_kwargs={{'classifier_dropout': {classifier_dropout}}}` instead."
)
if "config_kwargs" not in kwargs:
kwargs["config_kwargs"] = {"classifier_dropout": classifier_dropout}
else:
kwargs["config_kwargs"]["classifier_dropout"] = classifier_dropout
return func(self, *args, **kwargs)
return wrapper
def cross_encoder_predict_rank_args_decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
kwargs_renamed_mapping = {
"activation_fct": "activation_fn",
}
for old_name, new_name in kwargs_renamed_mapping.items():
if old_name in kwargs:
kwarg_value = kwargs.pop(old_name)
logger.warning(
f"The CrossEncoder.predict `{old_name}` argument was renamed and is now deprecated, please use `{new_name}` instead."
)
if new_name not in kwargs:
kwargs[new_name] = kwarg_value
deprecated_args = ["num_workers"]
for deprecated_arg in deprecated_args:
if deprecated_arg in kwargs:
kwargs.pop(deprecated_arg)
logger.warning(
f"The CrossEncoder.predict `{deprecated_arg}` argument is deprecated and has no effect. It will be removed in a future version."
)
return func(self, *args, **kwargs)
return wrapper
|
from __future__ import annotations
import functools
import logging
logger = logging.getLogger(__name__)
def cross_encoder_init_args_decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
kwargs_renamed_mapping = {
"model_name": "model_name_or_path",
"automodel_args": "model_kwargs",
"tokenizer_args": "tokenizer_kwargs",
"config_args": "config_kwargs",
"cache_dir": "cache_folder",
}
for old_name, new_name in kwargs_renamed_mapping.items():
if old_name in kwargs:
kwarg_value = kwargs.pop(old_name)
logger.warning(
f"The CrossEncoder `{old_name}` argument was renamed and is now deprecated, please use `{new_name}` instead."
)
if new_name not in kwargs:
kwargs[new_name] = kwarg_value
if "classifier_dropout" in kwargs:
classifier_dropout = kwargs.pop("classifier_dropout")
logger.warning(
f"The CrossEncoder `classifier_dropout` argument is deprecated. Please use `config_kwargs={{'classifier_dropout': {classifier_dropout}}}` instead."
)
if "config_kwargs" not in kwargs:
kwargs["config_kwargs"] = {"classifier_dropout": classifier_dropout}
else:
kwargs["config_kwargs"]["classifier_dropout"] = classifier_dropout
return func(self, *args, **kwargs)
return wrapper
|
_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py']
# Use ClassAwareSampler
train_dataloader = dict(
sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py']
# Use ClassAwareSampler
train_dataloader = dict(
sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
|
# ruff: noqa: F401
# This is the module that test_patching.py uses to test patch_submodule()
import os
import os as renamed_os
from os import path
from os import path as renamed_path
from os.path import join
from os.path import join as renamed_join
open = open # we just need to have a builtin inside this module to test it properly
|
# isort: skip_file
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: F401 - this is just for tests
import os as renamed_os # noqa: F401 - this is just for tests
from os import path # noqa: F401 - this is just for tests
from os import path as renamed_path # noqa: F401 - this is just for tests
from os.path import join # noqa: F401 - this is just for tests
from os.path import join as renamed_join # noqa: F401 - this is just for tests
open = open # noqa we just need to have a builtin inside this module to test it properly
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
import logging
from datetime import datetime
import sys
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/2d_matryoshka_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, inner_train_loss, [768, 512, 256, 128, 64])
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="2d-matryoshka-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/2d_matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocVec[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocVec)
for i, doc in enumerate(inner_docs):
assert isinstance(doc, InnerDoc)
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = '1'
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == '1'
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
def test_column_storage_to_dict():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
dict_view = view.to_dict()
assert dict_view['id'] == '0'
assert (dict_view['tensor'] == np.zeros(10)).all()
assert np.may_share_memory(dict_view['tensor'], view['tensor'])
assert dict_view['name'] == 'hello'
def test_storage_view_dict_like():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert list(view.keys()) == ['id', 'name', 'tensor']
# since boolean value of np array is ambiguous, we iterate manually
for val_view, val_reference in zip(view.values(), ['0', 'hello', np.zeros(10)]):
if isinstance(val_view, np.ndarray):
assert (val_view == val_reference).all()
else:
assert val_view == val_reference
for item_view, item_reference in zip(
view.items(), [('id', '0'), ('name', 'hello'), ('tensor', np.zeros(10))]
):
if isinstance(item_view[1], np.ndarray):
assert item_view[0] == item_reference[0]
assert (item_view[1] == item_reference[1]).all()
else:
assert item_view == item_reference
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocVec[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocVec[InnerDoc])
for i, doc in enumerate(inner_docs):
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = '1'
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == '1'
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
def test_column_storage_to_dict():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
dict_view = view.to_dict()
assert dict_view['id'] == '0'
assert (dict_view['tensor'] == np.zeros(10)).all()
assert np.may_share_memory(dict_view['tensor'], view['tensor'])
assert dict_view['name'] == 'hello'
def test_storage_view_dict_like():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert list(view.keys()) == ['id', 'name', 'tensor']
# since boolean value of np array is ambiguous, we iterate manually
for val_view, val_reference in zip(view.values(), ['0', 'hello', np.zeros(10)]):
if isinstance(val_view, np.ndarray):
assert (val_view == val_reference).all()
else:
assert val_view == val_reference
for item_view, item_reference in zip(
view.items(), [('id', '0'), ('name', 'hello'), ('tensor', np.zeros(10))]
):
if isinstance(item_view[1], np.ndarray):
assert item_view[0] == item_reference[0]
assert (item_view[1] == item_reference[1]).all()
else:
assert item_view == item_reference
|
"""Faiss reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FaissReader(BaseReader):
"""
Faiss reader.
Retrieves documents through an existing in-memory Faiss index.
These documents can then be used in a downstream LlamaIndex data structure.
If you wish use Faiss itself as an index to to organize documents,
insert documents, and perform queries on them, please use VectorStoreIndex
with FaissVectorStore.
Args:
faiss_index (faiss.Index): A Faiss Index object (required)
"""
def __init__(self, index: Any):
"""Initialize with parameters."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa
except ImportError:
raise ImportError(import_err_msg)
self._index = index
def load_data(
self,
query: np.ndarray,
id_to_text_map: Dict[str, str],
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""
Load data from Faiss.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
id_to_text_map (Dict[str, str]): A map from ID's to text.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
dists, indices = self._index.search(query, k)
documents = []
for qidx in range(indices.shape[0]):
for didx in range(indices.shape[1]):
doc_id = indices[qidx, didx]
if doc_id not in id_to_text_map:
raise ValueError(
f"Document ID {doc_id} not found in id_to_text_map."
)
text = id_to_text_map[doc_id]
documents.append(Document(text=text))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
"""Faiss reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FaissReader(BaseReader):
"""Faiss reader.
Retrieves documents through an existing in-memory Faiss index.
These documents can then be used in a downstream LlamaIndex data structure.
If you wish use Faiss itself as an index to to organize documents,
insert documents, and perform queries on them, please use VectorStoreIndex
with FaissVectorStore.
Args:
faiss_index (faiss.Index): A Faiss Index object (required)
"""
def __init__(self, index: Any):
"""Initialize with parameters."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa
except ImportError:
raise ImportError(import_err_msg)
self._index = index
def load_data(
self,
query: np.ndarray,
id_to_text_map: Dict[str, str],
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from Faiss.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
id_to_text_map (Dict[str, str]): A map from ID's to text.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
dists, indices = self._index.search(query, k)
documents = []
for qidx in range(indices.shape[0]):
for didx in range(indices.shape[1]):
doc_id = indices[qidx, didx]
if doc_id not in id_to_text_map:
raise ValueError(
f"Document ID {doc_id} not found in id_to_text_map."
)
text = id_to_text_map[doc_id]
documents.append(Document(text=text))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
__all__ = ['InMemoryExactNNIndex']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryDocIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
__all__ = ['InMemoryDocIndex']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.audio.audio_jax_array import AudioJaxArray
from docarray.typing.tensor.jaxarray import JaxArray
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AudioTensor")
class AudioTensor(AnyTensor, AbstractAudioTensor):
"""
Represents an audio tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AudioTensor
class MyAudioDoc(BaseDoc):
tensor: AudioTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyAudioDoc(tensor=tf.zeros(1000, 2))
type(doc.tensor) # AudioTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyAudioDoc(tensor=torch.zeros(1000, 2))
type(doc.tensor) # AudioTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyAudioDoc(tensor=np.zeros((1000, 2)))
type(doc.tensor) # AudioNdArray
'''
---
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(AudioTorchTensor, value)
elif isinstance(value, torch.Tensor):
return AudioTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(AudioTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return AudioTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(AudioJaxArray, value)
elif isinstance(value, jnp.ndarray):
return AudioJaxArray._docarray_from_native(value) # noqa
try:
return AudioNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AudioTensor")
class AudioTensor(AnyTensor, AbstractAudioTensor):
"""
Represents an audio tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AudioTensor
class MyAudioDoc(BaseDoc):
tensor: AudioTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyAudioDoc(tensor=tf.zeros(1000, 2))
type(doc.tensor) # AudioTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyAudioDoc(tensor=torch.zeros(1000, 2))
type(doc.tensor) # AudioTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyAudioDoc(tensor=np.zeros((1000, 2)))
type(doc.tensor) # AudioNdArray
'''
---
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(AudioTorchTensor, value)
elif isinstance(value, torch.Tensor):
return AudioTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(AudioTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return AudioTensorFlowTensor._docarray_from_native(value) # noqa
try:
return AudioNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
import pytest
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__"
DATASET_LOADING_SCRIPT_CODE = """
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def dataset_loading_script_name():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def dataset_loading_script_code():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path):
script_name = dataset_loading_script_name
script_dir = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=True)
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(dataset_loading_script_code)
return str(script_dir)
|
import pytest
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__"
DATASET_LOADING_SCRIPT_CODE = """
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def dataset_loading_script_name():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def dataset_loading_script_code():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path):
script_name = dataset_loading_script_name
script_dir = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=True)
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(dataset_loading_script_code)
return str(script_dir)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Pixtral model."""
import unittest
from transformers import (
PixtralVisionConfig,
PixtralVisionModel,
is_torch_available,
)
from transformers.testing_utils import (
require_torch,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
class PixtralVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in Pixtral, the seq length equals the number of patches * batch_size because the patches are flattened
self.seq_length = (image_size // patch_size) ** 2 * batch_size
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
image_sizes = torch.tensor(
[[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long, device=torch_device
)
config = self.get_config()
return config, pixel_values, image_sizes
def get_config(self):
return PixtralVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, image_sizes = config_and_inputs
inputs_dict = {"pixel_values": pixel_values, "image_sizes": image_sizes}
return config, inputs_dict
@require_torch
class PixtralVisionModelModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `PixtralVisionModel`.
"""
all_model_classes = (PixtralVisionModel,) if is_torch_available() else ()
additional_model_inputs = ["image_sizes"]
test_pruning = False
test_head_masking = False
test_torchscript = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = PixtralVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=PixtralVisionConfig, has_text_modality=False)
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Pixtral model."""
import unittest
from transformers import (
PixtralVisionConfig,
PixtralVisionModel,
is_torch_available,
)
from transformers.testing_utils import (
require_torch,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
class PixtralVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in Pixtral, the seq length equals the number of patches * batch_size because the patches are flattened
self.seq_length = (image_size // patch_size) ** 2 * batch_size
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
image_sizes = torch.tensor(
[[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long, device=torch_device
)
config = self.get_config()
return config, pixel_values, image_sizes
def get_config(self):
return PixtralVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, image_sizes = config_and_inputs
inputs_dict = {"pixel_values": pixel_values, "image_sizes": image_sizes}
return config, inputs_dict
@require_torch
class PixtralVisionModelModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `PixtralVisionModel`.
"""
all_model_classes = (PixtralVisionModel,) if is_torch_available() else ()
test_pruning = False
test_head_masking = False
test_torchscript = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = PixtralVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=PixtralVisionConfig, has_text_modality=False)
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
|
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be divisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
"""Test in memory docstore."""
from typing import Any
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert result_dict == DEF_EXPECTED_RESULT
def test_combining_output_parser_output_type() -> None:
"""Test combining output parser output type is Dict[str, Any]."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
assert combining_parser.OutputType == dict[str, Any]
|
"""Test in memory docstore."""
from typing import Any
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert DEF_EXPECTED_RESULT == result_dict
def test_combining_output_parser_output_type() -> None:
"""Test combining output parser output type is Dict[str, Any]."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
assert combining_parser.OutputType == dict[str, Any]
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""
Hashing function for dataset keys using `hashlib.md5`
Requirements for the hash function:
- Provides a uniformly distributed hash from random space
- Adequately fast speed
- Working with multiple input types (in this case, `str`, `int` or `bytes`)
- Should be platform independent (generates same hash on different OS and systems)
The hashing function provides a unique 128-bit integer hash of the key provided.
The split name is being used here as the hash salt to avoid having same hashes
in different splits due to same keys
"""
from typing import Union
from huggingface_hub.utils import insecure_hashlib
def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
"""
Returns the input hash_data in its bytes form
Args:
hash_data: the hash salt/key to be converted to bytes
"""
if isinstance(hash_data, bytes):
# Data already in bytes, returns as it as
return hash_data
elif isinstance(hash_data, str):
# We keep the data as it as for it ot be later encoded to UTF-8
# However replace `\\` with `/` for Windows compatibility
hash_data = hash_data.replace("\\", "/")
elif isinstance(hash_data, int):
hash_data = str(hash_data)
else:
# If data is not of the required type, raise error
raise InvalidKeyError(hash_data)
return hash_data.encode("utf-8")
class InvalidKeyError(Exception):
"""Raises an error when given key is of invalid datatype."""
def __init__(self, hash_data):
self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
self.suffix = "\nKeys should be either str, int or bytes type"
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
class DuplicatedKeysError(Exception):
"""Raise an error when duplicate key found."""
def __init__(self, key, duplicate_key_indices, fix_msg=""):
self.key = key
self.duplicate_key_indices = duplicate_key_indices
self.fix_msg = fix_msg
self.prefix = "Found multiple examples generated with the same key"
if len(duplicate_key_indices) <= 20:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
else:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
self.suffix = "\n" + fix_msg if fix_msg else ""
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
class KeyHasher:
"""KeyHasher class for providing hash using md5"""
def __init__(self, hash_salt: str):
self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
def hash(self, key: Union[str, int, bytes]) -> int:
"""Returns 128-bits unique hash of input key
Args:
key: the input key to be hashed (should be str, int or bytes)
Returns: 128-bit int hash key"""
md5 = self._split_md5.copy()
byte_key = _as_bytes(key)
md5.update(byte_key)
# Convert to integer with hexadecimal conversion
return int(md5.hexdigest(), 16)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""
Hashing function for dataset keys using `hashlib.md5`
Requirements for the hash function:
- Provides a uniformly distributed hash from random space
- Adequately fast speed
- Working with multiple input types (in this case, `str`, `int` or `bytes`)
- Should be platform independent (generates same hash on different OS and systems)
The hashing function provides a unique 128-bit integer hash of the key provided.
The split name is being used here as the hash salt to avoid having same hashes
in different splits due to same keys
"""
import hashlib
from typing import Union
def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
"""
Returns the input hash_data in its bytes form
Args:
hash_data: the hash salt/key to be converted to bytes
"""
if isinstance(hash_data, bytes):
# Data already in bytes, returns as it as
return hash_data
elif isinstance(hash_data, str):
# We keep the data as it as for it ot be later encoded to UTF-8
# However replace `\\` with `/` for Windows compatibility
hash_data = hash_data.replace("\\", "/")
elif isinstance(hash_data, int):
hash_data = str(hash_data)
else:
# If data is not of the required type, raise error
raise InvalidKeyError(hash_data)
return hash_data.encode("utf-8")
class InvalidKeyError(Exception):
"""Raises an error when given key is of invalid datatype."""
def __init__(self, hash_data):
self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
self.suffix = "\nKeys should be either str, int or bytes type"
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
class DuplicatedKeysError(Exception):
"""Raise an error when duplicate key found."""
def __init__(self, key, duplicate_key_indices, fix_msg=""):
self.key = key
self.duplicate_key_indices = duplicate_key_indices
self.fix_msg = fix_msg
self.prefix = "Found multiple examples generated with the same key"
if len(duplicate_key_indices) <= 20:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
else:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
self.suffix = "\n" + fix_msg if fix_msg else ""
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
class KeyHasher:
"""KeyHasher class for providing hash using md5"""
def __init__(self, hash_salt: str):
self._split_md5 = hashlib.md5(_as_bytes(hash_salt))
def hash(self, key: Union[str, int, bytes]) -> int:
"""Returns 128-bits unique hash of input key
Args:
key: the input key to be hashed (should be str, int or bytes)
Returns: 128-bit int hash key"""
md5 = self._split_md5.copy()
byte_key = _as_bytes(key)
md5.update(byte_key)
# Convert to integer with hexadecimal conversion
return int(md5.hexdigest(), 16)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import UUID
import pytest
from pydantic import schema_json_of
from pydantic.tools import parse_obj_as
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ID
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_id_validation(id):
parsed_id = parse_obj_as(ID, id)
assert parsed_id == str(id)
def test_json_schema():
schema_json_of(ID)
def test_dump_json():
id = parse_obj_as(ID, 1234)
orjson_dumps(id)
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_operators(id):
parsed_id = parse_obj_as(ID, id)
assert parsed_id == str(id)
assert parsed_id != 'aljdñjd'
assert str(id)[0:1] in parsed_id
assert 'docarray' not in parsed_id
|
from uuid import UUID
import pytest
from pydantic import schema_json_of
from pydantic.tools import parse_obj_as
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ID
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_id_validation(id):
parsed_id = parse_obj_as(ID, id)
assert parsed_id == str(id)
def test_json_schema():
schema_json_of(ID)
def test_dump_json():
id = parse_obj_as(ID, 1234)
orjson_dumps(id)
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_operators(id):
parsed_id = parse_obj_as(ID, id)
assert parsed_id == str(id)
assert parsed_id != 'aljdñjd'
assert str(id)[0:1] in parsed_id
assert 'docarray' not in parsed_id
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
img_norm_cfg = dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
to_rgb=False)
train_pipeline = [
# Images are converted to float32 directly after loading in PyCls
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
|
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"extend_pitch",
"fftconvolve",
"oscillator_bank",
"sinc_impulse_response",
"speed",
]
|
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"extend_pitch",
"fftconvolve",
"oscillator_bank",
"sinc_impulse_response",
]
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.tree import DecisionTreeClassifier
# Construct dataset
X1, y1 = make_gaussian_quantiles(
cov=2.0, n_samples=200, n_features=2, n_classes=2, random_state=1
)
X2, y2 = make_gaussian_quantiles(
mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1
)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, -y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
ax = plt.subplot(121)
disp = DecisionBoundaryDisplay.from_estimator(
bdt,
X,
cmap=plt.cm.Paired,
response_method="predict",
ax=ax,
xlabel="x",
ylabel="y",
)
x_min, x_max = disp.xx0.min(), disp.xx0.max()
y_min, y_max = disp.xx1.min(), disp.xx1.max()
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = (y == i).nonzero()
plt.scatter(
X[idx, 0],
X[idx, 1],
c=c,
s=20,
edgecolor="k",
label="Class %s" % n,
)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc="upper right")
plt.title("Decision Boundary")
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(
twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label="Class %s" % n,
alpha=0.5,
edgecolor="k",
)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc="upper right")
plt.ylabel("Samples")
plt.xlabel("Score")
plt.title("Decision Scores")
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.tree import DecisionTreeClassifier
# Construct dataset
X1, y1 = make_gaussian_quantiles(
cov=2.0, n_samples=200, n_features=2, n_classes=2, random_state=1
)
X2, y2 = make_gaussian_quantiles(
mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1
)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, -y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
ax = plt.subplot(121)
disp = DecisionBoundaryDisplay.from_estimator(
bdt,
X,
cmap=plt.cm.Paired,
response_method="predict",
ax=ax,
xlabel="x",
ylabel="y",
)
x_min, x_max = disp.xx0.min(), disp.xx0.max()
y_min, y_max = disp.xx1.min(), disp.xx1.max()
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(
X[idx, 0],
X[idx, 1],
c=c,
s=20,
edgecolor="k",
label="Class %s" % n,
)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc="upper right")
plt.title("Decision Boundary")
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(
twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label="Class %s" % n,
alpha=0.5,
edgecolor="k",
)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc="upper right")
plt.ylabel("Samples")
plt.xlabel("Score")
plt.title("Decision Scores")
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
from typing import Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl] = None
video_tensor: Optional[VideoTorchTensor] = None
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
# doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = doc_2.url.load().video
# doc_2.video_tensor.save(file_path='file_2.wav')
```
---
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
) -> T:
tensor = super()._docarray_validate(value=value)
return cls.validate_shape(value=tensor)
|
from typing import Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
# doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = doc_2.url.load().video
# doc_2.video_tensor.save(file_path='file_2.wav')
```
---
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
) -> T:
tensor = super()._docarray_validate(value=value)
return cls.validate_shape(value=tensor)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_reader import DPRReaderRanker
@pytest.mark.parametrize('request_size', [1, 8, 50])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(
text='just some random text here',
matches=[Document(text='random text', tags={'title': 'random title'})],
)
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=DPRReaderRanker) as flow:
resp = flow.post(
on='/search',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_reader import DPRReaderRanker
@pytest.mark.parametrize('request_size', [1, 8, 50])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(
text='just some random text here',
matches=[Document(text='random text', tags={'title': 'random title'})],
)
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=DPRReaderRanker) as flow:
resp = flow.post(
on='/search',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.12'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.11'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import NotebookLoader
from langchain_community.document_loaders.notebook import (
concatenate_cells,
remove_newlines,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_cells": "langchain_community.document_loaders.notebook",
"remove_newlines": "langchain_community.document_loaders.notebook",
"NotebookLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NotebookLoader",
"concatenate_cells",
"remove_newlines",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import NotebookLoader
from langchain_community.document_loaders.notebook import (
concatenate_cells,
remove_newlines,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_cells": "langchain_community.document_loaders.notebook",
"remove_newlines": "langchain_community.document_loaders.notebook",
"NotebookLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"concatenate_cells",
"remove_newlines",
"NotebookLoader",
]
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import TorchTensor
from docarray.utils._internal._typing import change_cls_name, safe_issubclass
T_doc = TypeVar('T_doc', bound=BaseDoc)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
---
```python
from torch.utils.data import DataLoader
from docarray import DocList
from docarray.data import MultiModalDataset
from docarray.documents import TextDoc
def prepend_number(text: str):
return f"Number {text}"
docs = DocList[TextDoc](TextDoc(text=str(i)) for i in range(16))
ds = MultiModalDataset[TextDoc](docs, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[TextDoc].collate_fn)
for batch in loader:
print(batch.text)
```
---
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
---
```python
import torch
from torch.utils.data import DataLoader
from docarray import DocList, BaseDoc
from docarray.data import MultiModalDataset
from docarray.documents import TextDoc
class Thesis(BaseDoc):
title: TextDoc
class Student(BaseDoc):
thesis: Thesis
def embed_title(title: TextDoc):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
docs = DocList[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
docs,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
```
---
:param docs: the `DocList` to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
"""
doc_type: Optional[Type[BaseDoc]] = None
__typed_ds__: Dict[Type[BaseDoc], Type['MultiModalDataset']] = {}
def __init__(
self, docs: 'DocList[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.docs = docs
self._preprocessing = preprocessing
def __len__(self):
return len(self.docs)
def __getitem__(self, item: int):
doc = self.docs[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.doc_type
if doc_type:
batch_da = DocVec[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocVec(batch, tensor_type=TorchTensor)
return batch_da
@classmethod
def __class_getitem__(cls, item: Type[BaseDoc]) -> Type['MultiModalDataset']:
if not safe_issubclass(item, BaseDoc):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
doc_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import TorchTensor
from docarray.utils._internal._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDoc)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
---
```python
from torch.utils.data import DataLoader
from docarray import DocList
from docarray.data import MultiModalDataset
from docarray.documents import TextDoc
def prepend_number(text: str):
return f"Number {text}"
docs = DocList[TextDoc](TextDoc(text=str(i)) for i in range(16))
ds = MultiModalDataset[TextDoc](docs, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[TextDoc].collate_fn)
for batch in loader:
print(batch.text)
```
---
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
---
```python
import torch
from torch.utils.data import DataLoader
from docarray import DocList, BaseDoc
from docarray.data import MultiModalDataset
from docarray.documents import TextDoc
class Thesis(BaseDoc):
title: TextDoc
class Student(BaseDoc):
thesis: Thesis
def embed_title(title: TextDoc):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
docs = DocList[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
docs,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
```
---
:param docs: the `DocList` to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
"""
doc_type: Optional[Type[BaseDoc]] = None
__typed_ds__: Dict[Type[BaseDoc], Type['MultiModalDataset']] = {}
def __init__(
self, docs: 'DocList[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.docs = docs
self._preprocessing = preprocessing
def __len__(self):
return len(self.docs)
def __getitem__(self, item: int):
doc = self.docs[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.doc_type
if doc_type:
batch_da = DocVec[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocVec(batch, tensor_type=TorchTensor)
return batch_da
@classmethod
def __class_getitem__(cls, item: Type[BaseDoc]) -> Type['MultiModalDataset']:
if not issubclass(item, BaseDoc):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
doc_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
"""
NOTE: This file must be imported like
``import torch.distributed.fsdp._traversal_utils`` and not like
``from torch.distributed.fsdp._traversal_utils import ...`` to avoid circular
imports. For brevity, we may import the file as ``traversal_utils``.
"""
import collections
import torch.nn as nn
from torch.distributed._composable.contract import _get_registry
from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state
"""
[Note: FSDP State Traversal]
For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel``
module wrapping a fully sharded module, and for the non-wrapper code path,
``_FSDPState`` is an object that gets embedded on a fully sharded module.
See [Note: Fully Sharded Module] for the definition.
There are three common traversal idioms: Given a root module,
- ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree.
- ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the
tree (i.e. those with ``_is_root == True``).
- ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree.
All of these methods must take in the root module (i.e. an ``nn.Module``) and
not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph
traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal.
"""
def _composable(module: nn.Module) -> bool:
"""
Returns if ``module`` can compose with ``fully_shard``.
"""
# TODO: Add any other composable APIs that are mutually exclusive.
registry = _get_registry(module)
if registry is None:
return True
return "replicate" not in registry
# TODO (awgu): We may be able to remove this function if we retired the
# `use_orig_params=False` code path since so far we only need the module for
# `FlatParameter` registration, which is not needed for `use_orig_params=True`.
def _get_fsdp_states_with_modules(
module: nn.Module,
) -> tuple[list[_FSDPState], list[nn.Module]]:
"""
Returns a tuple containing:
1. A list of the ``_FSDPState`` instances in the module tree rooted at
``module`` without any duplicates and following the ``module.modules()``
traversal order (which is assumed to be depth-first).
2. A corresponding list of the modules owning the states in the first list.
For the wrapper code path, both returned lists are the same, each
containing all ``FullyShardedDataParallel`` instances. For the composable
code path, this returns a list of all composable state instances and a list
of the corresponding fully sharded modules. See [Note: Fully Sharded
Module].
NOTE: The traversal does not proceed into any module annotated by an
incompatible API (e.g. ``replicate``).
"""
fsdp_states: list[_FSDPState] = []
fsdp_modules: list[nn.Module] = []
# Track the visited FSDP states since multiple modules may share the same
# one and we want to return a de-duplicated list
visited_fsdp_states: set[_FSDPState] = set()
# Track the visited modules in case of shared modules, which implies the
# module graph is no longer a tree
visited_modules: set[nn.Module] = set()
# Perform depth-first search from `module` to ensure that we do not
# traverse into an incompatible API's subtree (use DFS instead of BFS to
# match `.modules()` order)
deque: collections.deque[nn.Module] = collections.deque([module])
while deque:
submodule = deque.popleft()
visited_modules.add(submodule)
if not _composable(submodule):
continue
for child_module in reversed(list(submodule.children())):
if child_module not in visited_modules:
deque.appendleft(child_module)
optional_state = _get_module_fsdp_state(submodule)
if optional_state is not None and optional_state not in visited_fsdp_states:
visited_fsdp_states.add(optional_state)
fsdp_states.append(optional_state)
fsdp_modules.append(submodule)
return fsdp_states, fsdp_modules
def _get_fsdp_states(module: nn.Module) -> list[_FSDPState]:
"""See :func:`_get_fsdp_states_with_modules`."""
fsdp_states, _ = _get_fsdp_states_with_modules(module)
return fsdp_states
def _get_fsdp_handles(module: nn.Module) -> list:
"""
Returns all ``FlatParamHandle`` s in the module tree rooted at ``module``
following the rules in :func:`_get_fsdp_state`.
"""
handles = [
fsdp_state._handle
for fsdp_state in _get_fsdp_states(module)
if fsdp_state._handle is not None
]
return handles
|
"""
NOTE: This file must be imported like
``import torch.distributed.fsdp._traversal_utils`` and not like
``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular
imports. For brevity, we may import the file as ``traversal_utils``.
"""
import collections
import torch.nn as nn
from torch.distributed._composable.contract import _get_registry
from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state
"""
[Note: FSDP State Traversal]
For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel``
module wrapping a fully sharded module, and for the non-wrapper code path,
``_FSDPState`` is an object that gets embedded on a fully sharded module.
See [Note: Fully Sharded Module] for the definition.
There are three common traversal idioms: Given a root module,
- ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree.
- ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the
tree (i.e. those with ``_is_root == True``).
- ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree.
All of these methods must take in the root module (i.e. an ``nn.Module``) and
not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph
traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal.
"""
def _composable(module: nn.Module) -> bool:
"""
Returns if ``module`` can compose with ``fully_shard``.
"""
# TODO: Add any other composable APIs that are mutually exclusive.
registry = _get_registry(module)
if registry is None:
return True
return "replicate" not in registry
# TODO (awgu): We may be able to remove this function if we retired the
# `use_orig_params=False` code path since so far we only need the module for
# `FlatParameter` registration, which is not needed for `use_orig_params=True`.
def _get_fsdp_states_with_modules(
module: nn.Module,
) -> tuple[list[_FSDPState], list[nn.Module]]:
"""
Returns a tuple containing:
1. A list of the ``_FSDPState`` instances in the module tree rooted at
``module`` without any duplicates and following the ``module.modules()``
traversal order (which is assumed to be depth-first).
2. A corresponding list of the modules owning the states in the first list.
For the wrapper code path, both returned lists are the same, each
containing all ``FullyShardedDataParallel`` instances. For the composable
code path, this returns a list of all composable state instances and a list
of the corresponding fully sharded modules. See [Note: Fully Sharded
Module].
NOTE: The traversal does not proceed into any module annotated by an
incompatible API (e.g. ``replicate``).
"""
fsdp_states: list[_FSDPState] = []
fsdp_modules: list[nn.Module] = []
# Track the visited FSDP states since multiple modules may share the same
# one and we want to return a de-duplicated list
visited_fsdp_states: set[_FSDPState] = set()
# Track the visited modules in case of shared modules, which implies the
# module graph is no longer a tree
visited_modules: set[nn.Module] = set()
# Perform depth-first search from `module` to ensure that we do not
# traverse into an incompatible API's subtree (use DFS instead of BFS to
# match `.modules()` order)
deque: collections.deque[nn.Module] = collections.deque([module])
while deque:
submodule = deque.popleft()
visited_modules.add(submodule)
if not _composable(submodule):
continue
for child_module in reversed(list(submodule.children())):
if child_module not in visited_modules:
deque.appendleft(child_module)
optional_state = _get_module_fsdp_state(submodule)
if optional_state is not None and optional_state not in visited_fsdp_states:
visited_fsdp_states.add(optional_state)
fsdp_states.append(optional_state)
fsdp_modules.append(submodule)
return fsdp_states, fsdp_modules
def _get_fsdp_states(module: nn.Module) -> list[_FSDPState]:
"""See :func:`_get_fsdp_states_with_modules`."""
fsdp_states, _ = _get_fsdp_states_with_modules(module)
return fsdp_states
def _get_fsdp_handles(module: nn.Module) -> list:
"""
Returns all ``FlatParamHandle`` s in the module tree rooted at ``module``
following the rules in :func:`_get_fsdp_state`.
"""
handles = [
fsdp_state._handle
for fsdp_state in _get_fsdp_states(module)
if fsdp_state._handle is not None
]
return handles
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
traceback.print_exc()
sys.exit(1 if has_failure else 0)
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file)
traceback.print_exc()
print()
sys.exit(1 if has_failure else 0)
|
"""Format instructions."""
JSON_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
|
# flake8: noqa
JSON_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```"""
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import ExecutionResult
from backend.executor import ExecutionManager
from backend.server.external.middleware import require_permission
from backend.util.service import get_service_client
from backend.util.settings import Settings
@thread_cached
def execution_manager_client() -> ExecutionManager:
return get_service_client(ExecutionManager)
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: List[ExecutionResult]) -> List[Dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = execution_manager_client().add_execution(
graph_id,
graph_version=graph_version,
data=node_input,
user_id=api_key.user_id,
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import ExecutionResult
from backend.executor import ExecutionManager
from backend.server.external.middleware import require_permission
from backend.util.service import get_service_client
from backend.util.settings import Settings
@thread_cached
def execution_manager_client() -> ExecutionManager:
return get_service_client(ExecutionManager)
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: List[ExecutionResult]) -> List[Dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = execution_manager_client().add_execution(
graph_id,
graph_version=graph_version,
data=node_input,
user_id=api_key.user_id,
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import os
import torchaudio
import torchvision
from torch.utils.data import Dataset
def _load_list(args, *filenames):
output = []
length = []
for filename in filenames:
filepath = os.path.join(os.path.dirname(args.dataset_path), filename)
for line in open(filepath).read().splitlines():
rel_path, input_length = line.split(",")[1:3]
path = os.path.normpath(os.path.join(args.dataset_path, rel_path[:-4] + ".mp4"))
length.append(int(input_length))
output.append(path)
return output, length
def load_video(path):
"""
rtype: torch, T x C x H x W
"""
vid = torchvision.io.read_video(path, pts_unit="sec", output_format="THWC")[0]
vid = vid.permute((0, 3, 1, 2))
return vid
def load_audio(path):
"""
rtype: torch, T x 1
"""
waveform, sample_rate = torchaudio.load(path, normalize=True)
return waveform.transpose(1, 0)
def load_transcript(path):
transcript_path = path.replace("video_seg", "text_seg")[:-4] + ".txt"
return open(transcript_path).read().splitlines()[0]
def load_item(path, md):
if md == "v":
return (load_video(path), load_transcript(path))
if md == "a":
return (load_audio(path), load_transcript(path))
if md == "av":
return (load_audio(path), load_video(path), load_transcript(path))
class LRS3(Dataset):
def __init__(
self,
args,
subset: str = "train",
) -> None:
if subset is not None and subset not in ["train", "val", "test"]:
raise ValueError("When `subset` is not None, it must be one of ['train', 'val', 'test'].")
self.args = args
if subset == "train":
self._filelist, self._lengthlist = _load_list(self.args, "train_transcript_lengths_seg16s.csv")
if subset == "val":
self._filelist, self._lengthlist = _load_list(self.args, "test_transcript_lengths_seg16s.csv")
if subset == "test":
self._filelist, self._lengthlist = _load_list(self.args, "test_transcript_lengths_seg16s.csv")
def __getitem__(self, n):
path = self._filelist[n]
return load_item(path, self.args.md)
def __len__(self) -> int:
return len(self._filelist)
|
import os
from pathlib import Path
from typing import Tuple, Union
import torch
import torchaudio
import torchvision
from torch import Tensor
from torch.utils.data import Dataset
def _load_list(args, *filenames):
output = []
length = []
for filename in filenames:
filepath = os.path.join(os.path.dirname(args.dataset_path), filename)
for line in open(filepath).read().splitlines():
dataset_name, rel_path, input_length = line.split(",")[0], line.split(",")[1], line.split(",")[2]
path = os.path.normpath(os.path.join(args.dataset_path, rel_path[:-4] + ".mp4"))
length.append(int(input_length))
output.append(path)
return output, length
def load_video(path):
"""
rtype: torch, T x C x H x W
"""
vid = torchvision.io.read_video(path, pts_unit="sec", output_format="THWC")[0]
vid = vid.permute((0, 3, 1, 2))
return vid
def load_audio(path):
"""
rtype: torch, T x 1
"""
waveform, sample_rate = torchaudio.load(path, normalize=True)
return waveform.transpose(1, 0)
def load_transcript(path):
transcript_path = path.replace("video_seg", "text_seg")[:-4] + ".txt"
return open(transcript_path).read().splitlines()[0]
def load_item(path, md):
if md == "v":
return (load_video(path), load_transcript(path))
if md == "a":
return (load_audio(path), load_transcript(path))
if md == "av":
return (load_audio(path), load_video(path), load_transcript(path))
class LRS3(Dataset):
def __init__(
self,
args,
subset: str = "train",
) -> None:
if subset is not None and subset not in ["train", "val", "test"]:
raise ValueError("When `subset` is not None, it must be one of ['train', 'val', 'test'].")
self.args = args
if subset == "train":
self._filelist, self._lengthlist = _load_list(self.args, "train_transcript_lengths_seg16s.csv")
if subset == "val":
self._filelist, self._lengthlist = _load_list(self.args, "test_transcript_lengths_seg16s.csv")
if subset == "test":
self._filelist, self._lengthlist = _load_list(self.args, "test_transcript_lengths_seg16s.csv")
def __getitem__(self, n):
path = self._filelist[n]
return load_item(path, self.args.md)
def __len__(self) -> int:
return len(self._filelist)
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
rpn_weight = 0.7
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
]),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
rcnn=dict(score_thr=1e-3)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
rpn_weight = 0.7
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
sampling=False,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
sampling=True,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
]),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
rcnn=dict(score_thr=1e-3)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio._internal import download_url_to_file
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""*LJSpeech-1.1* :cite:`ljspeech17` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
_extract_tar(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
str:
Normalized Transcript
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""*LJSpeech-1.1* :cite:`ljspeech17` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
_extract_tar(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
str:
Normalized Transcript
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.