input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`~sentence_transformers.evaluation.TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
max_active_dims (Optional[int], optional): The maximum number of active dimensions to use.
`None` uses the model's current `max_active_dims`. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
max_active_dims: int | None = None,
):
self.max_active_dims = max_active_dims
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_to_cpu=True,
max_active_dims=self.max_active_dims,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`~sentence_transformers.evaluation.TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_to_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a string.
Args:
output: A string that may contain a valid JSON object or array surrounded by
extraneous characters or information.
Returns:
A string containing a valid JSON object or array.
"""
output = output.strip()
left_square = output.find("[")
left_brace = output.find("{")
if (left_square < left_brace and left_square != -1) or left_brace == -1:
left = left_square
right = output.rfind("]")
else:
left = left_brace
right = output.rfind("}")
return output[left : right + 1]
def parse_json_markdown(text: str) -> Any:
if "```json" in text:
text = text.split("```json")[1].strip().strip("```").strip()
json_string = _marshal_llm_to_json(text)
try:
json_obj = json.loads(json_string)
except json.JSONDecodeError as e_json:
try:
# NOTE: parsing again with pyyaml
# pyyaml is less strict, and allows for trailing commas
# right now we rely on this since guidance program generates
# trailing commas
json_obj = yaml.safe_load(json_string)
except yaml.YAMLError as e_yaml:
raise OutputParserException(
f"Got invalid JSON object. Error: {e_json} {e_yaml}. "
f"Got JSON string: {json_string}"
)
except NameError as exc:
raise ImportError("Please pip install PyYAML.") from exc
return json_obj
def parse_code_markdown(text: str, only_last: bool) -> List[str]:
# Regular expression pattern to match code within triple-backticks
pattern = r"```(.*?)```"
# Regular expression pattern to match code within triple backticks with
# a Python marker. Like: ```python df.columns```
python_str_pattern = re.compile(r"^```python", re.IGNORECASE)
text = python_str_pattern.sub("```", text)
# Find all matches of the pattern in the text
matches = re.findall(pattern, text, re.DOTALL)
# Return the last matched group if requested
code = matches[-1] if matches and only_last else matches
# If empty we optimistically assume the output is the code
if not code:
# we want to handle cases where the code may start or end with triple
# backticks
# we also want to handle cases where the code is surrounded by regular
# quotes
# we can't just remove all backticks due to JS template strings
candidate = text.strip()
if candidate.startswith('"') and candidate.endswith('"'):
candidate = candidate[1:-1]
if candidate.startswith("'") and candidate.endswith("'"):
candidate = candidate[1:-1]
if candidate.startswith("`") and candidate.endswith("`"):
candidate = candidate[1:-1]
# For triple backticks we split the handling of the start and end
# partly because there can be cases where only one and not the other
# is present, and partly because we don't need to be so worried
# about it being a string in a programming language
if candidate.startswith("```"):
candidate = re.sub(r"^```[a-zA-Z]*", "", candidate)
if candidate.endswith("```"):
candidate = candidate[:-3]
code = [candidate.strip()]
return code
def extract_json_str(text: str) -> str:
"""Extract JSON string from text."""
# NOTE: this regex parsing is taken from langchain.output_parsers.pydantic
match = re.search(r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL)
if not match:
raise ValueError(f"Could not extract json string from output: {text}")
return match.group()
|
import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a string.
Args:
output: A string that may contain a valid JSON object or array surrounded by
extraneous characters or information.
Returns:
A string containing a valid JSON object or array.
"""
output = output.strip()
left_square = output.find("[")
left_brace = output.find("{")
if left_square < left_brace and left_square != -1:
left = left_square
right = output.rfind("]")
else:
left = left_brace
right = output.rfind("}")
return output[left : right + 1]
def parse_json_markdown(text: str) -> Any:
if "```json" in text:
text = text.split("```json")[1].strip().strip("```").strip()
json_string = _marshal_llm_to_json(text)
try:
json_obj = json.loads(json_string)
except json.JSONDecodeError as e_json:
try:
# NOTE: parsing again with pyyaml
# pyyaml is less strict, and allows for trailing commas
# right now we rely on this since guidance program generates
# trailing commas
json_obj = yaml.safe_load(json_string)
except yaml.YAMLError as e_yaml:
raise OutputParserException(
f"Got invalid JSON object. Error: {e_json} {e_yaml}. "
f"Got JSON string: {json_string}"
)
except NameError as exc:
raise ImportError("Please pip install PyYAML.") from exc
return json_obj
def parse_code_markdown(text: str, only_last: bool) -> List[str]:
# Regular expression pattern to match code within triple-backticks
pattern = r"```(.*?)```"
# Regular expression pattern to match code within triple backticks with
# a Python marker. Like: ```python df.columns```
python_str_pattern = re.compile(r"^```python", re.IGNORECASE)
text = python_str_pattern.sub("```", text)
# Find all matches of the pattern in the text
matches = re.findall(pattern, text, re.DOTALL)
# Return the last matched group if requested
code = matches[-1] if matches and only_last else matches
# If empty we optimistically assume the output is the code
if not code:
# we want to handle cases where the code may start or end with triple
# backticks
# we also want to handle cases where the code is surrounded by regular
# quotes
# we can't just remove all backticks due to JS template strings
candidate = text.strip()
if candidate.startswith('"') and candidate.endswith('"'):
candidate = candidate[1:-1]
if candidate.startswith("'") and candidate.endswith("'"):
candidate = candidate[1:-1]
if candidate.startswith("`") and candidate.endswith("`"):
candidate = candidate[1:-1]
# For triple backticks we split the handling of the start and end
# partly because there can be cases where only one and not the other
# is present, and partly because we don't need to be so worried
# about it being a string in a programming language
if candidate.startswith("```"):
candidate = re.sub(r"^```[a-zA-Z]*", "", candidate)
if candidate.endswith("```"):
candidate = candidate[:-3]
code = [candidate.strip()]
return code
def extract_json_str(text: str) -> str:
"""Extract JSON string from text."""
# NOTE: this regex parsing is taken from langchain.output_parsers.pydantic
match = re.search(r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL)
if not match:
raise ValueError(f"Could not extract json string from output: {text}")
return match.group()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .ema import ExpMomentumEMA
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty',
'ExpMomentumEMA'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty'
]
|
import torch
import torch.nn as nn
class NormalizeDB(nn.Module):
r"""Normalize the spectrogram with a minimum db value"""
def __init__(self, min_level_db, normalization):
super().__init__()
self.min_level_db = min_level_db
self.normalization = normalization
def forward(self, specgram):
specgram = torch.log10(torch.clamp(specgram.squeeze(0), min=1e-5))
if self.normalization:
return torch.clamp((self.min_level_db - 20 * specgram) / self.min_level_db, min=0, max=1)
return specgram
def normalized_waveform_to_bits(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
assert abs(waveform).max() <= 1.0
waveform = (waveform + 1.0) * (2**bits - 1) / 2
return torch.clamp(waveform, 0, 2**bits - 1).int()
def bits_to_normalized_waveform(label: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform label [0, 2 ** bits - 1] to waveform [-1, 1]"""
return 2 * label / (2**bits - 1.0) - 1.0
|
import torch
import torch.nn as nn
class NormalizeDB(nn.Module):
r"""Normalize the spectrogram with a minimum db value"""
def __init__(self, min_level_db, normalization):
super().__init__()
self.min_level_db = min_level_db
self.normalization = normalization
def forward(self, specgram):
specgram = torch.log10(torch.clamp(specgram.squeeze(0), min=1e-5))
if self.normalization:
return torch.clamp((self.min_level_db - 20 * specgram) / self.min_level_db, min=0, max=1)
return specgram
def normalized_waveform_to_bits(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
assert abs(waveform).max() <= 1.0
waveform = (waveform + 1.0) * (2 ** bits - 1) / 2
return torch.clamp(waveform, 0, 2 ** bits - 1).int()
def bits_to_normalized_waveform(label: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform label [0, 2 ** bits - 1] to waveform [-1, 1]"""
return 2 * label / (2 ** bits - 1.0) - 1.0
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import json
import os
from typing import Dict
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: Dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
# Owner(s): ["module: inductor"]
import torch
from torch._inductor import config
from torch._inductor.async_compile import AsyncCompile, shutdown_compile_workers
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import fresh_cache
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
)
from torch.testing._internal.inductor_utils import (
GPU_TYPE,
requires_gpu,
requires_triton,
)
@instantiate_parametrized_tests
class TestAsyncCompile(TestCase):
@requires_gpu()
@requires_triton()
@parametrize("method", ("subprocess", "fork", "spawn"))
def test_pool(self, method):
def fn(x, y):
return x + y
x = torch.rand(10).to(GPU_TYPE)
y = torch.rand(10).to(GPU_TYPE)
with config.patch("worker_start_method", method):
shutdown_compile_workers()
AsyncCompile.wait_pool_ready()
with fresh_cache():
compiled_fn = torch.compile(fn)
self.assertEqual(fn(x, y), compiled_fn(x, y))
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["module: inductor"]
import torch
from torch._inductor import config
from torch._inductor.async_compile import AsyncCompile, shutdown_compile_workers
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import fresh_cache
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
)
from torch.testing._internal.inductor_utils import (
GPU_TYPE,
requires_gpu,
requires_triton,
)
@instantiate_parametrized_tests
class TestAsyncCompile(TestCase):
@requires_gpu()
@requires_triton()
@parametrize("method", ("subprocess", "fork", "spawn"))
def test_pool(self, method):
def fn(x, y):
return x + y
x = torch.rand(10).to(GPU_TYPE)
y = torch.rand(10).to(GPU_TYPE)
with config.patch("worker_start_method", method):
shutdown_compile_workers()
pool = AsyncCompile.process_pool()
pool.ready_future.result(timeout=120)
with fresh_cache():
compiled_fn = torch.compile(fn)
self.assertEqual(fn(x, y), compiled_fn(x, y))
if __name__ == "__main__":
run_tests()
|
"""OpenAI-Like embeddings."""
from typing import Any, Dict, Optional
import httpx
from llama_index.core.callbacks.base import CallbackManager
from llama_index.embeddings.openai import OpenAIEmbedding
class OpenAILikeEmbedding(OpenAIEmbedding):
"""
OpenAI-Like class for embeddings.
Args:
model_name (str):
Model for embedding.
api_key (str):
The API key (if any) to use for the embedding API.
api_base (str):
The base URL for the embedding API.
api_version (str):
The version for the embedding API.
max_retries (int):
The maximum number of retries for the embedding API.
timeout (float):
The timeout for the embedding API.
reuse_client (bool):
Whether to reuse the client for the embedding API.
callback_manager (CallbackManager):
The callback manager for the embedding API.
default_headers (Dict[str, str]):
The default headers for the embedding API.
additional_kwargs (Dict[str, Any]):
Additional kwargs for the embedding API.
dimensions (int):
The number of dimensions for the embedding API.
Example:
```bash
pip install llama-index-embeddings-openai-like
```
```python
from llama_index.embeddings.openai_like import OpenAILikeEmbedding
embedding = OpenAILikeEmbedding(
model_name="my-model-name",
api_base="http://localhost:1234/v1",
api_key="fake",
embed_batch_size=10,
)
```
"""
def __init__(
self,
model_name: str,
embed_batch_size: int = 10,
dimensions: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: str = "fake",
api_base: Optional[str] = None,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
num_workers: Optional[int] = None,
**kwargs: Any,
) -> None:
# ensure model is not passed in kwargs, will cause error in parent class
if "model" in kwargs:
raise ValueError(
"Use `model_name` instead of `model` to initialize OpenAILikeEmbedding"
)
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
dimensions=dimensions,
callback_manager=callback_manager,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
reuse_client=reuse_client,
timeout=timeout,
default_headers=default_headers,
num_workers=num_workers,
**kwargs,
)
|
"""OpenAI-Like embeddings."""
from typing import Any, Dict, Optional
import httpx
from llama_index.core.callbacks.base import CallbackManager
from llama_index.embeddings.openai import OpenAIEmbedding
class OpenAILikeEmbedding(OpenAIEmbedding):
"""OpenAI-Like class for embeddings.
Args:
model_name (str):
Model for embedding.
api_key (str):
The API key (if any) to use for the embedding API.
api_base (str):
The base URL for the embedding API.
api_version (str):
The version for the embedding API.
max_retries (int):
The maximum number of retries for the embedding API.
timeout (float):
The timeout for the embedding API.
reuse_client (bool):
Whether to reuse the client for the embedding API.
callback_manager (CallbackManager):
The callback manager for the embedding API.
default_headers (Dict[str, str]):
The default headers for the embedding API.
additional_kwargs (Dict[str, Any]):
Additional kwargs for the embedding API.
dimensions (int):
The number of dimensions for the embedding API.
Example:
```bash
pip install llama-index-embeddings-openai-like
```
```python
from llama_index.embeddings.openai_like import OpenAILikeEmbedding
embedding = OpenAILikeEmbedding(
model_name="my-model-name",
api_base="http://localhost:1234/v1",
api_key="fake",
embed_batch_size=10,
)
```
"""
def __init__(
self,
model_name: str,
embed_batch_size: int = 10,
dimensions: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: str = "fake",
api_base: Optional[str] = None,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
num_workers: Optional[int] = None,
**kwargs: Any,
) -> None:
# ensure model is not passed in kwargs, will cause error in parent class
if "model" in kwargs:
raise ValueError(
"Use `model_name` instead of `model` to initialize OpenAILikeEmbedding"
)
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
dimensions=dimensions,
callback_manager=callback_manager,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
reuse_client=reuse_client,
timeout=timeout,
default_headers=default_headers,
num_workers=num_workers,
**kwargs,
)
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import torch
from mmcv.parallel import collate
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
from mmdet.models import build_detector
def model_aug_test_template(cfg_file):
# get config
cfg = mmcv.Config.fromfile(cfg_file)
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['img_scale'] = [(1333, 800), (800, 600), (640, 480)]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
return aug_result
def test_aug_test_size():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
# get config
transform = dict(
type='MultiScaleFlipAug',
transforms=[],
img_scale=[(1333, 800), (800, 600), (640, 480)],
flip=True,
flip_direction=['horizontal', 'vertical', 'diagonal'])
multi_aug_test_module = build_from_cfg(transform, PIPELINES)
results = load(results)
results = multi_aug_test_module(load(results))
# len(["original", "horizontal", "vertical", "diagonal"]) *
# len([(1333, 800), (800, 600), (640, 480)])
assert len(results['img']) == 12
def test_cascade_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 80
def test_mask_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_htc_aug_test():
aug_result = model_aug_test_template('configs/htc/htc_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_scnet_aug_test():
aug_result = model_aug_test_template(
'configs/scnet/scnet_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_cornernet_aug_test():
# get config
cfg = mmcv.Config.fromfile(
'configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py')
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['scale_factor'] = [0.5, 1.0, 2.0]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
assert len(aug_result[0]) == 80
|
import os.path as osp
import mmcv
import torch
from mmcv.parallel import collate
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
from mmdet.models import build_detector
def model_aug_test_template(cfg_file):
# get config
cfg = mmcv.Config.fromfile(cfg_file)
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['img_scale'] = [(1333, 800), (800, 600), (640, 480)]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
return aug_result
def test_aug_test_size():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
# get config
transform = dict(
type='MultiScaleFlipAug',
transforms=[],
img_scale=[(1333, 800), (800, 600), (640, 480)],
flip=True,
flip_direction=['horizontal', 'vertical', 'diagonal'])
multi_aug_test_module = build_from_cfg(transform, PIPELINES)
results = load(results)
results = multi_aug_test_module(load(results))
# len(["original", "horizontal", "vertical", "diagonal"]) *
# len([(1333, 800), (800, 600), (640, 480)])
assert len(results['img']) == 12
def test_cascade_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 80
def test_mask_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_htc_aug_test():
aug_result = model_aug_test_template('configs/htc/htc_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_scnet_aug_test():
aug_result = model_aug_test_template(
'configs/scnet/scnet_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_cornernet_aug_test():
# get config
cfg = mmcv.Config.fromfile(
'configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py')
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['scale_factor'] = [0.5, 1.0, 2.0]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
assert len(aug_result[0]) == 80
|
import logging
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseNanoBEIREvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Querie: num_rows: 49.92307692307692, num_cols: 30522.0, row_non_zero_mean: 74.91560451801007, row_sparsity_mean: 0.9975455219929035
Average Corpus: num_rows: 4334.7692307692305, num_cols: 30522.0, row_non_zero_mean: 174.81000049297626, row_sparsity_mean: 0.9942726905529315
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.75%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.24%
MRR@10: 0.6821
NDCG@10: 0.6204
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for all NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
"""
Hub is a central trustworthy that is aware of the existence of isolated apps, and that can reliably receive user queries and route them to the appropriate apps.
"""
from typing import Optional, Sequence, Callable
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.callbacks import (
CallbackManager,
)
from llama_index.core.llms.llm import LLM
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.settings import Settings
from llama_index.core.tools import BaseTool, ToolOutput
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core.llama_pack.base import BaseLlamaPack
from .planner import HubPlanner
from .tool_importer import ToolImporter
from .hub_operator import HubOperator
class SecGPTPack(BaseLlamaPack):
"""
SecGPT Hub.
A central trustworthy entity that routes user queries to appropriate isolated apps.
"""
def __init__(
self,
tools: Sequence[BaseTool],
tool_specs: Sequence[BaseToolSpec],
llm: LLM = None,
memory: BaseMemory = None,
output_parser: Optional[ReActOutputParser] = None,
verbose: bool = False,
handle_reasoning_failure_fn: Optional[
Callable[[CallbackManager, Exception], ToolOutput]
] = None,
user_id: Optional[str] = "0",
) -> None:
"""
Initialize the SecGPTPack.
Args:
tools (Sequence[BaseTool]): A sequence of available tools.
tool_specs (Sequence[BaseToolSpec]): Specifications for the tools.
llm (LLM, optional): Language Model used for processing. Defaults to Settings.llm.
memory (BaseMemory, optional): Memory component to keep track of conversation history. Defaults to ChatMemoryBuffer.
output_parser (Optional[ReActOutputParser], optional): Parser for handling the output. Defaults to None.
verbose (bool, optional): Flag to enable verbose output. Defaults to False.
handle_reasoning_failure_fn (Optional[Callable[[CallbackManager, Exception], ToolOutput]], optional): Callable function to handle reasoning failures. Defaults to None.
user_id (Optional[str], optional): User identifier. Defaults to "0".
"""
self.llm = llm or Settings.llm
self.memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=[], llm=self.llm
)
self.output_parser = output_parser
self.verbose = verbose
self.handle_reasoning_failure_fn = handle_reasoning_failure_fn
self.user_id = user_id
self.planner = HubPlanner(self.llm)
self.tool_importer = ToolImporter(tools, tool_specs)
self.hub_operator = HubOperator(self.tool_importer, self.user_id)
def chat(
self,
query: str,
) -> str:
"""
Process a user query and generate a response.
Args:
query (str): The user query to process.
Returns:
str: The response generated by SecGPT.
"""
memory_content = self.memory.get()
self.memory.put(ChatMessage(role=MessageRole.USER, content=(query)))
tool_info = self.tool_importer.get_tool_info()
plan = self.planner.plan_generate(query, tool_info, memory_content)
response = self.hub_operator.run(query, plan)
self.memory.put(ChatMessage(role=MessageRole.CHATBOT, content=(response)))
return response
# Create an alias for the SecGPTPack class
Hub = SecGPTPack
|
"""
Hub is a central trustworthy that is aware of the existence of isolated apps, and that can reliably receive user queries and route them to the appropriate apps.
"""
from typing import Optional, Sequence, Callable
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.callbacks import (
CallbackManager,
)
from llama_index.core.llms.llm import LLM
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.settings import Settings
from llama_index.core.tools import BaseTool, ToolOutput
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core.llama_pack.base import BaseLlamaPack
from .planner import HubPlanner
from .tool_importer import ToolImporter
from .hub_operator import HubOperator
class SecGPTPack(BaseLlamaPack):
"""
SecGPT Hub.
A central trustworthy entity that routes user queries to appropriate isolated apps.
"""
def __init__(
self,
tools: Sequence[BaseTool],
tool_specs: Sequence[BaseToolSpec],
llm: LLM = None,
memory: BaseMemory = None,
output_parser: Optional[ReActOutputParser] = None,
verbose: bool = False,
handle_reasoning_failure_fn: Optional[
Callable[[CallbackManager, Exception], ToolOutput]
] = None,
user_id: Optional[str] = "0",
) -> None:
"""
Initialize the SecGPTPack.
Args:
tools (Sequence[BaseTool]): A sequence of available tools.
tool_specs (Sequence[BaseToolSpec]): Specifications for the tools.
llm (LLM, optional): Language Model used for processing. Defaults to Settings.llm.
memory (BaseMemory, optional): Memory component to keep track of conversation history. Defaults to ChatMemoryBuffer.
output_parser (Optional[ReActOutputParser], optional): Parser for handling the output. Defaults to None.
verbose (bool, optional): Flag to enable verbose output. Defaults to False.
handle_reasoning_failure_fn (Optional[Callable[[CallbackManager, Exception], ToolOutput]], optional): Callable function to handle reasoning failures. Defaults to None.
user_id (Optional[str], optional): User identifier. Defaults to "0".
"""
self.llm = llm or Settings.llm
self.memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=[], llm=self.llm
)
self.output_parser = output_parser
self.verbose = verbose
self.handle_reasoning_failure_fn = handle_reasoning_failure_fn
self.user_id = user_id
self.planner = HubPlanner(self.llm)
self.tool_importer = ToolImporter(tools, tool_specs)
self.hub_operator = HubOperator(self.tool_importer, self.user_id)
def chat(
self,
query: str,
) -> str:
"""
Process a user query and generate a response.
Args:
query (str): The user query to process.
Returns:
str: The response generated by SecGPT.
"""
memory_content = self.memory.get()
self.memory.put(ChatMessage(role=MessageRole.USER, content=(query)))
tool_info = self.tool_importer.get_tool_info()
plan = self.planner.plan_generate(query, tool_info, memory_content)
response = self.hub_operator.run(query, plan)
self.memory.put(ChatMessage(role=MessageRole.CHATBOT, content=(response)))
return response
# Create an alias for the SecGPTPack class
Hub = SecGPTPack
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCoSENTLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCoSENTLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
"""
===================
Torchscript support
===================
.. note::
Try on `Colab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_scripted_tensor_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_others_plot_scripted_tensor_transforms.py>` to download the full example code.
This example illustrates `torchscript
<https://pytorch.org/docs/stable/jit.html>`_ support of the torchvision
:ref:`transforms <transforms>` on Tensor images.
"""
# %%
from pathlib import Path
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision.transforms as v1
from torchvision.io import read_image
plt.rcParams["savefig.bbox"] = 'tight'
torch.manual_seed(1)
# If you're trying to run that on Colab, you can download the assets and the
# helpers from https://github.com/pytorch/vision/tree/main/gallery/
import sys
sys.path += ["../transforms"]
from helpers import plot
ASSETS_PATH = Path('../assets')
# %%
# Most transforms support torchscript. For composing transforms, we use
# :class:`torch.nn.Sequential` instead of
# :class:`~torchvision.transforms.v2.Compose`:
dog1 = read_image(str(ASSETS_PATH / 'dog1.jpg'))
dog2 = read_image(str(ASSETS_PATH / 'dog2.jpg'))
transforms = torch.nn.Sequential(
v1.RandomCrop(224),
v1.RandomHorizontalFlip(p=0.3),
)
scripted_transforms = torch.jit.script(transforms)
plot([dog1, scripted_transforms(dog1), dog2, scripted_transforms(dog2)])
# %%
# .. warning::
#
# Above we have used transforms from the ``torchvision.transforms``
# namespace, i.e. the "v1" transforms. The v2 transforms from the
# ``torchvision.transforms.v2`` namespace are the :ref:`recommended
# <v1_or_v2>` way to use transforms in your code.
#
# The v2 transforms also support torchscript, but if you call
# ``torch.jit.script()`` on a v2 **class** transform, you'll actually end up
# with its (scripted) v1 equivalent. This may lead to slightly different
# results between the scripted and eager executions due to implementation
# differences between v1 and v2.
#
# If you really need torchscript support for the v2 transforms, **we
# recommend scripting the functionals** from the
# ``torchvision.transforms.v2.functional`` namespace to avoid surprises.
#
# Below we now show how to combine image transformations and a model forward
# pass, while using ``torch.jit.script`` to obtain a single scripted module.
#
# Let's define a ``Predictor`` module that transforms the input tensor and then
# applies an ImageNet model on it.
from torchvision.models import resnet18, ResNet18_Weights
class Predictor(nn.Module):
def __init__(self):
super().__init__()
weights = ResNet18_Weights.DEFAULT
self.resnet18 = resnet18(weights=weights, progress=False).eval()
self.transforms = weights.transforms(antialias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.resnet18(x)
return y_pred.argmax(dim=1)
# %%
# Now, let's define scripted and non-scripted instances of ``Predictor`` and
# apply it on multiple tensor images of the same size
device = "cuda" if torch.cuda.is_available() else "cpu"
predictor = Predictor().to(device)
scripted_predictor = torch.jit.script(predictor).to(device)
batch = torch.stack([dog1, dog2]).to(device)
res = predictor(batch)
res_scripted = scripted_predictor(batch)
# %%
# We can verify that the prediction of the scripted and non-scripted models are
# the same:
import json
with open(Path('../assets') / 'imagenet_class_index.json') as labels_file:
labels = json.load(labels_file)
for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
assert pred == pred_scripted
print(f"Prediction for Dog {i + 1}: {labels[str(pred.item())]}")
# %%
# Since the model is scripted, it can be easily dumped on disk and re-used
import tempfile
with tempfile.NamedTemporaryFile() as f:
scripted_predictor.save(f.name)
dumped_scripted_predictor = torch.jit.load(f.name)
res_scripted_dumped = dumped_scripted_predictor(batch)
assert (res_scripted_dumped == res_scripted).all()
# %%
|
"""
===================
Torchscript support
===================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_scripted_tensor_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_others_plot_scripted_tensor_transforms.py>` to download the full example code.
This example illustrates `torchscript
<https://pytorch.org/docs/stable/jit.html>`_ support of the torchvision
:ref:`transforms <transforms>` on Tensor images.
"""
# %%
from pathlib import Path
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision.transforms as v1
from torchvision.io import read_image
plt.rcParams["savefig.bbox"] = 'tight'
torch.manual_seed(1)
# If you're trying to run that on collab, you can download the assets and the
# helpers from https://github.com/pytorch/vision/tree/main/gallery/
import sys
sys.path += ["../transforms"]
from helpers import plot
ASSETS_PATH = Path('../assets')
# %%
# Most transforms support torchscript. For composing transforms, we use
# :class:`torch.nn.Sequential` instead of
# :class:`~torchvision.transforms.v2.Compose`:
dog1 = read_image(str(ASSETS_PATH / 'dog1.jpg'))
dog2 = read_image(str(ASSETS_PATH / 'dog2.jpg'))
transforms = torch.nn.Sequential(
v1.RandomCrop(224),
v1.RandomHorizontalFlip(p=0.3),
)
scripted_transforms = torch.jit.script(transforms)
plot([dog1, scripted_transforms(dog1), dog2, scripted_transforms(dog2)])
# %%
# .. warning::
#
# Above we have used transforms from the ``torchvision.transforms``
# namespace, i.e. the "v1" transforms. The v2 transforms from the
# ``torchvision.transforms.v2`` namespace are the :ref:`recommended
# <v1_or_v2>` way to use transforms in your code.
#
# The v2 transforms also support torchscript, but if you call
# ``torch.jit.script()`` on a v2 **class** transform, you'll actually end up
# with its (scripted) v1 equivalent. This may lead to slightly different
# results between the scripted and eager executions due to implementation
# differences between v1 and v2.
#
# If you really need torchscript support for the v2 transforms, **we
# recommend scripting the functionals** from the
# ``torchvision.transforms.v2.functional`` namespace to avoid surprises.
#
# Below we now show how to combine image transformations and a model forward
# pass, while using ``torch.jit.script`` to obtain a single scripted module.
#
# Let's define a ``Predictor`` module that transforms the input tensor and then
# applies an ImageNet model on it.
from torchvision.models import resnet18, ResNet18_Weights
class Predictor(nn.Module):
def __init__(self):
super().__init__()
weights = ResNet18_Weights.DEFAULT
self.resnet18 = resnet18(weights=weights, progress=False).eval()
self.transforms = weights.transforms(antialias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.resnet18(x)
return y_pred.argmax(dim=1)
# %%
# Now, let's define scripted and non-scripted instances of ``Predictor`` and
# apply it on multiple tensor images of the same size
device = "cuda" if torch.cuda.is_available() else "cpu"
predictor = Predictor().to(device)
scripted_predictor = torch.jit.script(predictor).to(device)
batch = torch.stack([dog1, dog2]).to(device)
res = predictor(batch)
res_scripted = scripted_predictor(batch)
# %%
# We can verify that the prediction of the scripted and non-scripted models are
# the same:
import json
with open(Path('../assets') / 'imagenet_class_index.json') as labels_file:
labels = json.load(labels_file)
for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
assert pred == pred_scripted
print(f"Prediction for Dog {i + 1}: {labels[str(pred.item())]}")
# %%
# Since the model is scripted, it can be easily dumped on disk and re-used
import tempfile
with tempfile.NamedTemporaryFile() as f:
scripted_predictor.save(f.name)
dumped_scripted_predictor = torch.jit.load(f.name)
res_scripted_dumped = dumped_scripted_predictor(batch)
assert (res_scripted_dumped == res_scripted).all()
# %%
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from rich.console import Console, ConsoleOptions, RenderResult
from rich.measure import Measurement
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class TensorDisplay:
"""
Rich representation of a tensor.
"""
tensor_min_width: int = 30
def __init__(self, tensor: 'AbstractTensor'):
self.tensor = tensor
def __rich_console__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'RenderResult':
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < 200:
import colorsys
from rich.color import Color
from rich.segment import Segment
from rich.style import Style
tensor_normalized = comp_be.minmax_normalize(
comp_be.detach(self.tensor), (0, 5)
)
hue = 0.75
saturation = 1.0
for idx, y in enumerate(tensor_normalized):
luminance = 0.1 + ((y / 5) * 0.7)
r, g, b = colorsys.hls_to_rgb(hue, luminance + 0.07, saturation)
color = Color.from_rgb(r * 255, g * 255, b * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
else:
from rich.text import Text
yield Text(f'{type(self.tensor)} of shape {comp_be.shape(self.tensor)}')
def __rich_measure__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'Measurement':
from rich.measure import Measurement
width = self._compute_table_width(max_width=options.max_width)
return Measurement(1, width)
def _compute_table_width(self, max_width: int) -> int:
"""
Compute the width of the table. Depending on the length of the tensor, the width
should be in the range of 30 (min) and a given `max_width`.
:return: the width of the table
"""
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
min_capped = max(comp_be.shape(t_squeezed)[0], self.tensor_min_width)
min_max_capped = min(min_capped, max_width)
return min_max_capped
|
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from rich.console import Console, ConsoleOptions, RenderResult
from rich.measure import Measurement
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class TensorDisplay:
"""
Rich representation of a tensor.
"""
def __init__(self, tensor: 'AbstractTensor'):
self.tensor = tensor
def __rich_console__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'RenderResult':
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < 200:
import colorsys
from rich.color import Color
from rich.segment import Segment
from rich.style import Style
tensor_normalized = comp_be.minmax_normalize(
comp_be.detach(self.tensor), (0, 5)
)
hue = 0.75
saturation = 1.0
for idx, y in enumerate(tensor_normalized):
luminance = 0.1 + ((y / 5) * 0.7)
r, g, b = colorsys.hls_to_rgb(hue, luminance + 0.07, saturation)
color = Color.from_rgb(r * 255, g * 255, b * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
else:
from rich.text import Text
yield Text(f'{type(self.tensor)} of shape {comp_be.shape(self.tensor)}')
def __rich_measure__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'Measurement':
from rich.measure import Measurement
return Measurement(1, options.max_width)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
|
from keras.src.utils.module_utils import dmtree
def register_tree_node_class(cls):
return cls
def is_nested(structure):
return dmtree.is_nested(structure)
def traverse(func, structure, top_down=True):
return dmtree.traverse(func, structure, top_down=top_down)
def flatten(structure):
return dmtree.flatten(structure)
def map_structure(func, *structures):
return dmtree.map_structure(func, *structures)
def map_structure_up_to(shallow_structure, func, *structures):
return dmtree.map_structure_up_to(shallow_structure, func, *structures)
def assert_same_structure(a, b, check_types=True):
return dmtree.assert_same_structure(a, b, check_types=check_types)
def pack_sequence_as(structure, flat_sequence, sequence_fn=None):
is_nested_fn = dmtree.is_nested
sequence_fn = sequence_fn or dmtree._sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_nested_fn(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a structure, but found "
"incompatible type `{}` instead.".format(
truncate(flat_sequence, 100), type(flat_sequence)
)
)
if not is_nested_fn(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure),
truncate(structure, 100),
type(flat_sequence),
len(flat_sequence),
truncate(flat_sequence, 100),
)
)
return flat_sequence[0]
try:
final_index, packed = packed_nest_with_indices(
structure, flat_sequence, 0, is_nested_fn, sequence_fn
)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = dmtree.flatten(structure)
if len(flat_structure) != len(flat_sequence):
# pylint: disable=raise-missing-from
raise ValueError(
"Could not pack sequence. "
f"Structure had {len(flat_structure)} atoms, but "
f"flat_sequence had {len(flat_sequence)} items. "
f"Structure: {structure}, flat_sequence: {flat_sequence}."
)
return sequence_fn(structure, packed)
def packed_nest_with_indices(
structure, flat, index, is_nested_fn, sequence_fn=None
):
"""Helper function for pack_sequence_as.
Args:
structure: structure to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_nested_fn: Function used to test if a value should
be treated as a nested structure.
sequence_fn: Function used to generate a new structure instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat`
having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
"""
packed = []
sequence_fn = sequence_fn or dmtree._sequence_like
for s in yield_value(structure):
if is_nested_fn(s):
new_index, child = packed_nest_with_indices(
s, flat, index, is_nested_fn, sequence_fn
)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def yield_value(iterable):
for _, v in dmtree._yield_sorted_items(iterable):
yield v
def lists_to_tuples(structure):
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return dmtree._sequence_like(instance, args)
return pack_sequence_as(
structure,
dmtree.flatten(structure),
sequence_fn=sequence_fn,
)
def is_shape_tuple(x):
if isinstance(x, (list, tuple)):
if all(isinstance(e, (int, type(None))) for e in x):
return True
return False
def map_shape_structure(func, structure):
if is_shape_tuple(structure):
return func(tuple(structure))
if isinstance(structure, list):
return [map_shape_structure(func, e) for e in structure]
if isinstance(structure, tuple):
return tuple(map_shape_structure(func, e) for e in structure)
if isinstance(structure, dict):
return {k: map_shape_structure(func, v) for k, v in structure.items()}
else:
raise ValueError(f"Cannot map function to unknown object {structure}")
|
from keras.src.utils.module_utils import dmtree
def register_tree_node_class(cls):
return cls
def is_nested(structure):
dmtree.is_nested(structure)
def traverse(func, structure, top_down=True):
return dmtree.traverse(func, structure, top_down=top_down)
def flatten(structure):
return dmtree.flatten(structure)
def map_structure(func, *structures):
return dmtree.map_structure(func, *structures)
def map_structure_up_to(shallow_structure, func, *structures):
return dmtree.map_structure_up_to(shallow_structure, func, *structures)
def assert_same_structure(a, b, check_types=True):
return dmtree.assert_same_structure(a, b, check_types=check_types)
def pack_sequence_as(structure, flat_sequence, sequence_fn=None):
is_nested_fn = dmtree.is_nested
sequence_fn = sequence_fn or dmtree._sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_nested_fn(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a structure, but found "
"incompatible type `{}` instead.".format(
truncate(flat_sequence, 100), type(flat_sequence)
)
)
if not is_nested_fn(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure),
truncate(structure, 100),
type(flat_sequence),
len(flat_sequence),
truncate(flat_sequence, 100),
)
)
return flat_sequence[0]
try:
final_index, packed = packed_nest_with_indices(
structure, flat_sequence, 0, is_nested_fn, sequence_fn
)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = dmtree.flatten(structure)
if len(flat_structure) != len(flat_sequence):
# pylint: disable=raise-missing-from
raise ValueError(
"Could not pack sequence. "
f"Structure had {len(flat_structure)} atoms, but "
f"flat_sequence had {len(flat_sequence)} items. "
f"Structure: {structure}, flat_sequence: {flat_sequence}."
)
return sequence_fn(structure, packed)
def packed_nest_with_indices(
structure, flat, index, is_nested_fn, sequence_fn=None
):
"""Helper function for pack_sequence_as.
Args:
structure: structure to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_nested_fn: Function used to test if a value should
be treated as a nested structure.
sequence_fn: Function used to generate a new structure instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat`
having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
"""
packed = []
sequence_fn = sequence_fn or dmtree._sequence_like
for s in yield_value(structure):
if is_nested_fn(s):
new_index, child = packed_nest_with_indices(
s, flat, index, is_nested_fn, sequence_fn
)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def yield_value(iterable):
for _, v in dmtree._yield_sorted_items(iterable):
yield v
def lists_to_tuples(structure):
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return dmtree._sequence_like(instance, args)
return pack_sequence_as(
structure,
dmtree.flatten(structure),
sequence_fn=sequence_fn,
)
def is_shape_tuple(x):
if isinstance(x, (list, tuple)):
if all(isinstance(e, (int, type(None))) for e in x):
return True
return False
def map_shape_structure(func, structure):
if is_shape_tuple(structure):
return func(tuple(structure))
if isinstance(structure, list):
return [map_shape_structure(func, e) for e in structure]
if isinstance(structure, tuple):
return tuple(map_shape_structure(func, e) for e in structure)
if isinstance(structure, dict):
return {k: map_shape_structure(func, v) for k, v in structure.items()}
else:
raise ValueError(f"Cannot map function to unknown object {structure}")
|
from codecs import unicode_escape_decode
from typing import Dict
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from typing import Sequence, Iterable
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns.items():
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
if self._config.tag_indices:
for index in self._config.tag_indices:
text = doc.tags.get(index)
if text is not None:
extra_columns[index] = text
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.flushdb()
|
from codecs import unicode_escape_decode
from typing import Dict
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from typing import Sequence, Iterable
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns.items():
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.flushdb()
|
import argparse
import os
from gzip import GzipFile
from time import time
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
from joblib import Memory
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=10)
parser.add_argument("--lightgbm", action="store_true", default=False)
parser.add_argument("--xgboost", action="store_true", default=False)
parser.add_argument("--catboost", action="store_true", default=False)
parser.add_argument("--learning-rate", type=float, default=1.0)
parser.add_argument("--subsample", type=int, default=None)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument("--no-predict", action="store_true", default=False)
parser.add_argument("--cache-loc", type=str, default="/tmp")
parser.add_argument("--no-interactions", type=bool, default=False)
parser.add_argument("--max-features", type=float, default=1.0)
args = parser.parse_args()
HERE = os.path.dirname(__file__)
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz"
m = Memory(location=args.cache_loc, mmap_mode="r")
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
subsample = args.subsample
lr = args.learning_rate
max_bins = args.max_bins
max_features = args.max_features
@m.cache
def load_data():
filename = os.path.join(HERE, URL.rsplit("/", 1)[-1])
if not os.path.exists(filename):
print(f"Downloading {URL} to {filename} (2.6 GB)...")
urlretrieve(URL, filename)
print("done.")
print(f"Parsing {filename}...")
tic = time()
with GzipFile(filename) as f:
df = pd.read_csv(f, header=None, dtype=np.float32)
toc = time()
print(f"Loaded {df.values.nbytes / 1e9:0.3f} GB in {toc - tic:0.3f}s")
return df
def fit(est, data_train, target_train, libname):
print(f"Fitting a {libname} model...")
tic = time()
est.fit(data_train, target_train)
toc = time()
print(f"fitted in {toc - tic:.3f}s")
def predict(est, data_test, target_test):
if args.no_predict:
return
tic = time()
predicted_test = est.predict(data_test)
predicted_proba_test = est.predict_proba(data_test)
toc = time()
roc_auc = roc_auc_score(target_test, predicted_proba_test[:, 1])
acc = accuracy_score(target_test, predicted_test)
print(f"predicted in {toc - tic:.3f}s, ROC AUC: {roc_auc:.4f}, ACC: {acc:.4f}")
df = load_data()
target = df.values[:, 0]
data = np.ascontiguousarray(df.values[:, 1:])
data_train, data_test, target_train, target_test = train_test_split(
data, target, test_size=0.2, random_state=0
)
n_classes = len(np.unique(target))
if subsample is not None:
data_train, target_train = data_train[:subsample], target_train[:subsample]
n_samples, n_features = data_train.shape
print(f"Training set with {n_samples} records with {n_features} features.")
if args.no_interactions:
interaction_cst = [[i] for i in range(n_features)]
else:
interaction_cst = None
est = HistGradientBoostingClassifier(
loss="log_loss",
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=1,
interaction_cst=interaction_cst,
max_features=max_features,
)
fit(est, data_train, target_train, "sklearn")
predict(est, data_test, target_test)
if args.lightgbm:
est = get_equivalent_estimator(est, lib="lightgbm", n_classes=n_classes)
fit(est, data_train, target_train, "lightgbm")
predict(est, data_test, target_test)
if args.xgboost:
est = get_equivalent_estimator(est, lib="xgboost", n_classes=n_classes)
fit(est, data_train, target_train, "xgboost")
predict(est, data_test, target_test)
if args.catboost:
est = get_equivalent_estimator(est, lib="catboost", n_classes=n_classes)
fit(est, data_train, target_train, "catboost")
predict(est, data_test, target_test)
|
import argparse
import os
from gzip import GzipFile
from time import time
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
from joblib import Memory
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=10)
parser.add_argument("--lightgbm", action="store_true", default=False)
parser.add_argument("--xgboost", action="store_true", default=False)
parser.add_argument("--catboost", action="store_true", default=False)
parser.add_argument("--learning-rate", type=float, default=1.0)
parser.add_argument("--subsample", type=int, default=None)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument("--no-predict", action="store_true", default=False)
parser.add_argument("--cache-loc", type=str, default="/tmp")
parser.add_argument("--no-interactions", type=bool, default=False)
parser.add_argument("--max-features", type=float, default=1.0)
args = parser.parse_args()
HERE = os.path.dirname(__file__)
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz"
m = Memory(location=args.cache_loc, mmap_mode="r")
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
subsample = args.subsample
lr = args.learning_rate
max_bins = args.max_bins
max_features = args.max_features
@m.cache
def load_data():
filename = os.path.join(HERE, URL.rsplit("/", 1)[-1])
if not os.path.exists(filename):
print(f"Downloading {URL} to {filename} (2.6 GB)...")
urlretrieve(URL, filename)
print("done.")
print(f"Parsing {filename}...")
tic = time()
with GzipFile(filename) as f:
df = pd.read_csv(f, header=None, dtype=np.float32)
toc = time()
print(f"Loaded {df.values.nbytes / 1e9:0.3f} GB in {toc - tic:0.3f}s")
return df
def fit(est, data_train, target_train, libname):
print(f"Fitting a {libname} model...")
tic = time()
est.fit(data_train, target_train)
toc = time()
print(f"fitted in {toc - tic:.3f}s")
def predict(est, data_test, target_test):
if args.no_predict:
return
tic = time()
predicted_test = est.predict(data_test)
predicted_proba_test = est.predict_proba(data_test)
toc = time()
roc_auc = roc_auc_score(target_test, predicted_proba_test[:, 1])
acc = accuracy_score(target_test, predicted_test)
print(f"predicted in {toc - tic:.3f}s, ROC AUC: {roc_auc:.4f}, ACC: {acc :.4f}")
df = load_data()
target = df.values[:, 0]
data = np.ascontiguousarray(df.values[:, 1:])
data_train, data_test, target_train, target_test = train_test_split(
data, target, test_size=0.2, random_state=0
)
n_classes = len(np.unique(target))
if subsample is not None:
data_train, target_train = data_train[:subsample], target_train[:subsample]
n_samples, n_features = data_train.shape
print(f"Training set with {n_samples} records with {n_features} features.")
if args.no_interactions:
interaction_cst = [[i] for i in range(n_features)]
else:
interaction_cst = None
est = HistGradientBoostingClassifier(
loss="log_loss",
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=1,
interaction_cst=interaction_cst,
max_features=max_features,
)
fit(est, data_train, target_train, "sklearn")
predict(est, data_test, target_test)
if args.lightgbm:
est = get_equivalent_estimator(est, lib="lightgbm", n_classes=n_classes)
fit(est, data_train, target_train, "lightgbm")
predict(est, data_test, target_test)
if args.xgboost:
est = get_equivalent_estimator(est, lib="xgboost", n_classes=n_classes)
fit(est, data_train, target_train, "xgboost")
predict(est, data_test, target_test)
if args.catboost:
est = get_equivalent_estimator(est, lib="catboost", n_classes=n_classes)
fit(est, data_train, target_train, "catboost")
predict(est, data_test, target_test)
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
if TYPE_CHECKING:
from langchain_community.callbacks import LLMThoughtLabeler
from streamlit.delta_generator import DeltaGenerator
def StreamlitCallbackHandler(
parent_container: DeltaGenerator,
*,
max_thought_containers: int = 4,
expand_new_thoughts: bool = True,
collapse_completed_thoughts: bool = True,
thought_labeler: Optional[LLMThoughtLabeler] = None,
) -> BaseCallbackHandler:
"""Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
"""
# If we're using a version of Streamlit that implements StreamlitCallbackHandler,
# delegate to it instead of using our built-in handler. The official handler is
# guaranteed to support the same set of kwargs.
try:
from streamlit.external.langchain import StreamlitCallbackHandler
# This is the official handler, so we can just return it.
return StreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
except ImportError:
try:
from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( # noqa: E501
StreamlitCallbackHandler as _InternalStreamlitCallbackHandler,
)
except ImportError as e:
msg = (
"To use the StreamlitCallbackHandler, please install "
"langchain-community with `pip install langchain-community`."
)
raise ImportError(msg) from e
return _InternalStreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
if TYPE_CHECKING:
from langchain_community.callbacks import LLMThoughtLabeler
from streamlit.delta_generator import DeltaGenerator
def StreamlitCallbackHandler(
parent_container: DeltaGenerator,
*,
max_thought_containers: int = 4,
expand_new_thoughts: bool = True,
collapse_completed_thoughts: bool = True,
thought_labeler: Optional[LLMThoughtLabeler] = None,
) -> BaseCallbackHandler:
"""Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
"""
# If we're using a version of Streamlit that implements StreamlitCallbackHandler,
# delegate to it instead of using our built-in handler. The official handler is
# guaranteed to support the same set of kwargs.
try:
from streamlit.external.langchain import StreamlitCallbackHandler
# This is the official handler, so we can just return it.
return StreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
except ImportError:
try:
from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( # noqa: E501
StreamlitCallbackHandler as _InternalStreamlitCallbackHandler,
)
except ImportError:
msg = (
"To use the StreamlitCallbackHandler, please install "
"langchain-community with `pip install langchain-community`."
)
raise ImportError(msg)
return _InternalStreamlitCallbackHandler(
parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler,
)
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import UsageTransactionMetadata, block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
reason=f"Ran block {entry.block_id} {block.name}",
),
)
return cost
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(loop_scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import UsageTransactionMetadata, block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
),
)
return cost
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(loop_scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None, import_error_msg=None):
self.name = name
self.pip_name = pip_name or name
self.import_error_msg = import_error_msg or (
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(self.import_error_msg)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torch_xla = LazyModule(
"torch_xla",
import_error_msg=(
"This requires the torch_xla module. You can install it via "
"`pip install torch-xla`. Additionally, you may need to update "
"LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, "
"_XLAC.so, which needs to link to the version of Python it was built "
"with. Use the following command to update LD_LIBRARY_PATH: "
"`export LD_LIBRARY_PATH=<path to Python>/lib:$LD_LIBRARY_PATH`"
),
)
optree = LazyModule("optree")
dmtree = LazyModule("tree")
tf2onnx = LazyModule("tf2onnx")
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None, import_error_msg=None):
self.name = name
self.pip_name = pip_name or name
self.import_error_msg = import_error_msg or (
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(self.import_error_msg)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torchvision = LazyModule("torchvision")
torch_xla = LazyModule(
"torch_xla",
import_error_msg=(
"This requires the torch_xla module. You can install it via "
"`pip install torch-xla`. Additionally, you may need to update "
"LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, "
"_XLAC.so, which needs to link to the version of Python it was built "
"with. Use the following command to update LD_LIBRARY_PATH: "
"`export LD_LIBRARY_PATH=<path to Python>/lib:$LD_LIBRARY_PATH`"
),
)
optree = LazyModule("optree")
dmtree = LazyModule("tree")
tf2onnx = LazyModule("tf2onnx")
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import BM25Retriever
from langchain_community.retrievers.bm25 import default_preprocessing_func
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"default_preprocessing_func": "langchain_community.retrievers.bm25",
"BM25Retriever": "langchain_community.retrievers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BM25Retriever",
"default_preprocessing_func",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import BM25Retriever
from langchain_community.retrievers.bm25 import default_preprocessing_func
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"default_preprocessing_func": "langchain_community.retrievers.bm25",
"BM25Retriever": "langchain_community.retrievers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"default_preprocessing_func",
"BM25Retriever",
]
|
from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import (
ID,
AnyUrl,
Embedding,
ImageUrl,
NdArray,
TextUrl,
TorchTensor,
)
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
torch_tensor=TorchTensor,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import (
ID,
AnyUrl,
Embedding,
ImageUrl,
Tensor,
TextUrl,
TorchTensor,
)
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
tensor=Tensor,
torch_tensor=TorchTensor,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
# Modified from:
# https://github.com/nyno-ai/openai-token-counter
from typing import Any, Callable, Dict, List, Optional
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.utils import get_tokenizer
class TokenCounter:
"""
Token counter class.
Attributes:
model (Optional[str]): The model to use for token counting.
"""
def __init__(self, tokenizer: Optional[Callable[[str], list]] = None) -> None:
self.tokenizer = tokenizer or get_tokenizer()
def get_string_tokens(self, string: str) -> int:
"""
Get the token count for a string.
Args:
string (str): The string to count.
Returns:
int: The token count.
"""
return len(self.tokenizer(string))
def estimate_tokens_in_messages(self, messages: List[ChatMessage]) -> int:
"""
Estimate token count for a single message.
Args:
message (OpenAIMessage): The message to estimate the token count for.
Returns:
int: The estimated token count.
"""
tokens = 0
for message in messages:
if message.role:
tokens += self.get_string_tokens(message.role)
if isinstance(message.content, str):
tokens += self.get_string_tokens(message.content)
additional_kwargs = {**message.additional_kwargs}
# backward compatibility
if "function_call" in additional_kwargs:
function_call = additional_kwargs.pop("function_call")
if function_call.get("name", None) is not None:
tokens += self.get_string_tokens(function_call["name"])
if function_call.get("arguments", None) is not None:
tokens += self.get_string_tokens(function_call["arguments"])
tokens += 3 # Additional tokens for function call
if "tool_calls" in additional_kwargs:
for tool_call in additional_kwargs["tool_calls"]:
if (
hasattr(tool_call, "function")
and tool_call.function is not None
):
tokens += self.get_string_tokens(tool_call.function.name)
tokens += self.get_string_tokens(tool_call.function.arguments)
tokens += 3 # Additional tokens for tool call
tokens += 3 # Add three per message
if message.role == MessageRole.FUNCTION or message.role == MessageRole.TOOL:
tokens -= 2 # Subtract 2 if role is "function"
return tokens
def estimate_tokens_in_tools(self, tools: List[Dict[str, Any]]) -> int:
"""
Estimate token count for the tools.
We take here a list of tools created using the `to_openai_tool()` function (or similar).
Args:
tools (list[Dict[str, Any]]): The tools to estimate the token count for.
Returns:
int: The estimated token count.
"""
if not tools:
return 0
return self.get_string_tokens(str(tools))
|
# Modified from:
# https://github.com/nyno-ai/openai-token-counter
from typing import Any, Callable, Dict, List, Optional
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.utils import get_tokenizer
class TokenCounter:
"""Token counter class.
Attributes:
model (Optional[str]): The model to use for token counting.
"""
def __init__(self, tokenizer: Optional[Callable[[str], list]] = None) -> None:
self.tokenizer = tokenizer or get_tokenizer()
def get_string_tokens(self, string: str) -> int:
"""Get the token count for a string.
Args:
string (str): The string to count.
Returns:
int: The token count.
"""
return len(self.tokenizer(string))
def estimate_tokens_in_messages(self, messages: List[ChatMessage]) -> int:
"""Estimate token count for a single message.
Args:
message (OpenAIMessage): The message to estimate the token count for.
Returns:
int: The estimated token count.
"""
tokens = 0
for message in messages:
if message.role:
tokens += self.get_string_tokens(message.role)
if isinstance(message.content, str):
tokens += self.get_string_tokens(message.content)
additional_kwargs = {**message.additional_kwargs}
# backward compatibility
if "function_call" in additional_kwargs:
function_call = additional_kwargs.pop("function_call")
if function_call.get("name", None) is not None:
tokens += self.get_string_tokens(function_call["name"])
if function_call.get("arguments", None) is not None:
tokens += self.get_string_tokens(function_call["arguments"])
tokens += 3 # Additional tokens for function call
if "tool_calls" in additional_kwargs:
for tool_call in additional_kwargs["tool_calls"]:
if (
hasattr(tool_call, "function")
and tool_call.function is not None
):
tokens += self.get_string_tokens(tool_call.function.name)
tokens += self.get_string_tokens(tool_call.function.arguments)
tokens += 3 # Additional tokens for tool call
tokens += 3 # Add three per message
if message.role == MessageRole.FUNCTION or message.role == MessageRole.TOOL:
tokens -= 2 # Subtract 2 if role is "function"
return tokens
def estimate_tokens_in_tools(self, tools: List[Dict[str, Any]]) -> int:
"""Estimate token count for the tools.
We take here a list of tools created using the `to_openai_tool()` function (or similar).
Args:
tools (list[Dict[str, Any]]): The tools to estimate the token count for.
Returns:
int: The estimated token count.
"""
if not tools:
return 0
return self.get_string_tokens(str(tools))
|
from .notifications import NotificationManager, NotificationManagerClient
__all__ = [
"NotificationManager",
"NotificationManagerClient",
]
|
from .notifications import NotificationManager
__all__ = [
"NotificationManager",
]
|
"""**LangSmith** utilities.
This module provides utilities for connecting to `LangSmith <https://smith.langchain.com/>`_. For more information on LangSmith, see the `LangSmith documentation <https://docs.smith.langchain.com/>`_.
**Evaluation**
LangSmith helps you evaluate Chains and other language model application components using a number of LangChain evaluators.
An example of this is shown below, assuming you've created a LangSmith dataset called ``<my_dataset_name>``:
.. code-block:: python
from langsmith import Client
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import RunEvalConfig, run_on_dataset
# Chains may have memory. Passing in a constructor function lets the
# evaluation framework avoid cross-contamination between runs.
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
# Load off-the-shelf evaluators via config or the EvaluatorType (string or enum)
evaluation_config = RunEvalConfig(
evaluators=[
"qa", # "Correctness" against a reference answer
"embedding_distance",
RunEvalConfig.Criteria("helpfulness"),
RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
You can also create custom evaluators by subclassing the
:class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>`
or LangSmith's `RunEvaluator` classes.
.. code-block:: python
from typing import Optional
from langchain.evaluation import StringEvaluator
class MyStringEvaluator(StringEvaluator):
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "exact_match"
def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
return {"score": prediction == reference}
evaluation_config = RunEvalConfig(
custom_evaluators = [MyStringEvaluator()],
)
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
**Primary Functions**
- :func:`arun_on_dataset <langchain.smith.evaluation.runner_utils.arun_on_dataset>`: Asynchronous function to evaluate a chain, agent, or other LangChain component over a dataset.
- :func:`run_on_dataset <langchain.smith.evaluation.runner_utils.run_on_dataset>`: Function to evaluate a chain, agent, or other LangChain component over a dataset.
- :class:`RunEvalConfig <langchain.smith.evaluation.config.RunEvalConfig>`: Class representing the configuration for running evaluation. You can select evaluators by :class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>` or config, or you can pass in `custom_evaluators`
""" # noqa: E501
from langchain.smith.evaluation import (
RunEvalConfig,
arun_on_dataset,
run_on_dataset,
)
__all__ = [
"RunEvalConfig",
"arun_on_dataset",
"run_on_dataset",
]
|
"""**LangSmith** utilities.
This module provides utilities for connecting to `LangSmith <https://smith.langchain.com/>`_. For more information on LangSmith, see the `LangSmith documentation <https://docs.smith.langchain.com/>`_.
**Evaluation**
LangSmith helps you evaluate Chains and other language model application components using a number of LangChain evaluators.
An example of this is shown below, assuming you've created a LangSmith dataset called ``<my_dataset_name>``:
.. code-block:: python
from langsmith import Client
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import RunEvalConfig, run_on_dataset
# Chains may have memory. Passing in a constructor function lets the
# evaluation framework avoid cross-contamination between runs.
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
# Load off-the-shelf evaluators via config or the EvaluatorType (string or enum)
evaluation_config = RunEvalConfig(
evaluators=[
"qa", # "Correctness" against a reference answer
"embedding_distance",
RunEvalConfig.Criteria("helpfulness"),
RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
You can also create custom evaluators by subclassing the
:class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>`
or LangSmith's `RunEvaluator` classes.
.. code-block:: python
from typing import Optional
from langchain.evaluation import StringEvaluator
class MyStringEvaluator(StringEvaluator):
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "exact_match"
def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
return {"score": prediction == reference}
evaluation_config = RunEvalConfig(
custom_evaluators = [MyStringEvaluator()],
)
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
**Primary Functions**
- :func:`arun_on_dataset <langchain.smith.evaluation.runner_utils.arun_on_dataset>`: Asynchronous function to evaluate a chain, agent, or other LangChain component over a dataset.
- :func:`run_on_dataset <langchain.smith.evaluation.runner_utils.run_on_dataset>`: Function to evaluate a chain, agent, or other LangChain component over a dataset.
- :class:`RunEvalConfig <langchain.smith.evaluation.config.RunEvalConfig>`: Class representing the configuration for running evaluation. You can select evaluators by :class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>` or config, or you can pass in `custom_evaluators`
""" # noqa: E501
from langchain.smith.evaluation import (
RunEvalConfig,
arun_on_dataset,
run_on_dataset,
)
__all__ = [
"arun_on_dataset",
"run_on_dataset",
"RunEvalConfig",
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from .alexnet import *
from .convnext import *
from .densenet import *
from .efficientnet import *
from .googlenet import *
from .inception import *
from .mnasnet import *
from .mobilenet import *
from .regnet import *
from .resnet import *
from .shufflenetv2 import *
from .squeezenet import *
from .vgg import *
from .vision_transformer import *
from .swin_transformer import *
from .maxvit import *
from . import detection, optical_flow, quantization, segmentation, video
# The Weights and WeightsEnum are developer-facing utils that we make public for
# downstream libs like torchgeo https://github.com/pytorch/vision/issues/7094
# TODO: we could / should document them publicly, but it's not clear where, as
# they're not intended for end users.
from ._api import get_model, get_model_builder, get_model_weights, get_weight, list_models, Weights, WeightsEnum
|
from .alexnet import *
from .convnext import *
from .densenet import *
from .efficientnet import *
from .googlenet import *
from .inception import *
from .mnasnet import *
from .mobilenet import *
from .regnet import *
from .resnet import *
from .shufflenetv2 import *
from .squeezenet import *
from .vgg import *
from .vision_transformer import *
from .swin_transformer import *
from .maxvit import *
from . import detection, optical_flow, quantization, segmentation, video
from ._api import get_model, get_model_builder, get_model_weights, get_weight, list_models
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='file_1.mp4',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='file_2.mp4')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='file_2.mp4')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
]
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
from typing import Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
scores = []
for evaluator in self.evaluators:
scores.append(evaluator(model, output_path, epoch, steps))
return self.main_score_function(scores)
|
from . import SentenceEvaluator
from typing import Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
scores = []
for evaluator in self.evaluators:
scores.append(evaluator(model, output_path, epoch, steps))
return self.main_score_function(scores)
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://openrouter.ai/api/v1"
DEFAULT_MODEL = "gryphe/mythomax-l2-13b"
class OpenRouter(OpenAILike):
"""
OpenRouter LLM.
To instantiate the `OpenRouter` class, you will need to provide an API key. You can set the API key either as an environment variable `OPENROUTER_API_KEY` or directly in the class
constructor. If setting it in the class constructor, it would look like this:
If you haven't signed up for an API key yet, you can do so on the OpenRouter website at (https://openrouter.ai). Once you have your API key, you can use the `OpenRouter` class to interact
with the LLM for tasks like chatting, streaming, and completing prompts.
Examples:
`pip install llama-index-llms-openrouter`
```python
from llama_index.llms.openrouter import OpenRouter
llm = OpenRouter(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gryphe/mythomax-l2-13b",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The OpenRouter model to use. See https://openrouter.ai/models for options."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. See https://openrouter.ai/models for options.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "OPENROUTER_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "OPENROUTER_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "OpenRouter_LLM"
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://openrouter.ai/api/v1"
DEFAULT_MODEL = "gryphe/mythomax-l2-13b"
class OpenRouter(OpenAILike):
"""OpenRouter LLM.
To instantiate the `OpenRouter` class, you will need to provide an API key. You can set the API key either as an environment variable `OPENROUTER_API_KEY` or directly in the class
constructor. If setting it in the class constructor, it would look like this:
If you haven't signed up for an API key yet, you can do so on the OpenRouter website at (https://openrouter.ai). Once you have your API key, you can use the `OpenRouter` class to interact
with the LLM for tasks like chatting, streaming, and completing prompts.
Examples:
`pip install llama-index-llms-openrouter`
```python
from llama_index.llms.openrouter import OpenRouter
llm = OpenRouter(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gryphe/mythomax-l2-13b",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The OpenRouter model to use. See https://openrouter.ai/models for options."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. See https://openrouter.ai/models for options.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "OPENROUTER_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "OPENROUTER_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "OpenRouter_LLM"
|
DEPRECATED_ARGS_MAPPING = {
'override_with': 'uses_with',
'override_metas': 'uses_metas',
'override_requests': 'uses_requests',
'port_expose': 'port',
'parallel': 'One of "shards" (when dividing data in indexers) or "replicas" (replicating Executors for performance and reliability)',
'port_in': 'port',
'host_in': 'host',
'https': 'tls',
'disable_reduce': 'no_reduce',
'deployments_disable_reduce': 'deployments_no_reduce'
}
def get_deprecated_replacement(dep_arg: str) -> str:
"""Get the replacement of a deprecated argument
:param dep_arg: the old dep arg
:return: the new argument
"""
normalized_arg = dep_arg.replace('--', '').replace('-', '_')
if normalized_arg in DEPRECATED_ARGS_MAPPING:
new_argument = DEPRECATED_ARGS_MAPPING[normalized_arg]
if '-' in dep_arg:
new_argument = new_argument.replace('_', '-')
if dep_arg.startswith('--'):
new_argument = '--' + new_argument
elif dep_arg.startswith('-'):
new_argument = '-' + new_argument
return new_argument
|
DEPRECATED_ARGS_MAPPING = {
'override_with': 'uses_with',
'override_metas': 'uses_metas',
'override_requests': 'uses_requests',
'port_expose': 'port',
'parallel': 'One of "shards" (when dividing data in indexers) or "replicas" (replicating Executors for performance and reliability)',
'port_in': 'port',
'https': 'tls',
}
def get_deprecated_replacement(dep_arg: str) -> str:
"""Get the replacement of a deprecated argument
:param dep_arg: the old dep arg
:return: the new argument
"""
normalized_arg = dep_arg.replace('--', '').replace('-', '_')
if normalized_arg in DEPRECATED_ARGS_MAPPING:
new_argument = DEPRECATED_ARGS_MAPPING[normalized_arg]
if '-' in dep_arg:
new_argument = new_argument.replace('_', '-')
if dep_arg.startswith('--'):
new_argument = '--' + new_argument
elif dep_arg.startswith('-'):
new_argument = '-' + new_argument
return new_argument
|
from typing import MutableSequence, TYPE_CHECKING, Union, Iterable
from docarray import Document
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class BaseDocumentArray(MutableSequence[Document]):
def __init__(self, *args, storage: str = 'memory', **kwargs):
super().__init__()
self._init_storage(*args, **kwargs)
self._init_subindices(*args, **kwargs)
def __add__(self: 'T', other: Union['Document', Iterable['Document']]) -> 'T':
v = type(self)(self)
v.extend(other)
return v
|
from typing import MutableSequence, TYPE_CHECKING, Union, Iterable
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import T
class BaseDocumentArray(MutableSequence[Document]):
def __init__(self, *args, storage: str = 'memory', **kwargs):
super().__init__()
self._init_storage(*args, **kwargs)
self._init_subindices(*args, **kwargs)
def __add__(self: 'T', other: Union['Document', Iterable['Document']]) -> 'T':
v = type(self)(self)
v.extend(other)
return v
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDoc):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain:
- an [`Mesh3DUrl`][docarray.typing.url.Mesh3DUrl] (`Mesh3D.url`)
- a [`VerticesAndFaces`][docarray.documents.mesh.vertices_and_faces.VerticesAndFaces]
object containing:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of
vertices (`Mesh3D.tensors.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of faces (`Mesh3D.tensors.faces`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`Mesh3D.embedding`)
- a `bytes` object (`Mesh3D.bytes_`).
You can use this Document directly:
```python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.tensors.vertices)
```
You can extend this Document:
```python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[str]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.name = 'my first mesh'
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.vertices)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import Mesh3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
mesh: Mesh3D
text: TextDoc
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes_ = mmdoc.mesh.url.load_bytes()
```
You can display your 3D mesh in a notebook from either its url, or its tensors:
```python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
# mesh.tensors.display()
```
"""
url: Optional[Mesh3DUrl]
tensors: Optional[VerticesAndFaces]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDoc):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain an Mesh3DUrl (`Mesh3D.url`), a VerticesAndFaces
object containing an AnyTensor of vertices (`Mesh3D.tensors.vertices) and an
AnyTensor of faces (`Mesh3D.tensors.faces), and an AnyEmbedding
(`Mesh3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.tensors.vertices)
You can extend this Document:
.. code-block:: python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[Text]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.vertices)
mesh.name = 'my first mesh'
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import Mesh3D, Text
# compose it
class MultiModalDoc(BaseDoc):
mesh: Mesh3D
text: Text
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes_ = mmdoc.mesh.url.load_bytes()
You can display your 3D mesh in a notebook from either its url, or its tensors:
.. code-block:: python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.tensors.vertices)
"""
url: Optional[Mesh3DUrl]
tensors: Optional[VerticesAndFaces]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
import torch
import torch.nn as nn
from torch.cuda.amp import GradScaler
from mmengine.registry import OPTIM_WRAPPERS
from mmengine.utils import TORCH_VERSION, digit_version
from .optimizer_wrapper import OptimWrapper
@OPTIM_WRAPPERS.register_module()
class AmpOptimWrapper(OptimWrapper):
"""A subclass of :class:`OptimWrapper` that supports automatic mixed
precision training based on torch.cuda.amp.
``AmpOptimWrapper`` provides a unified interface with
``OptimWrapper``, so ``AmpOptimWrapper`` can be used in the same way
as ``OptimWrapper``.
Warnings:
``AmpOptimWrapper`` requires PyTorch >= 1.6.
Args:
loss_scale (float or str or dict): The initial configuration of
`torch.cuda.amp.GradScaler`. See more specific arguments
introduction at `PyTorch AMP <https://pytorch.org/docs/stable/amp.html?highlight=gradscalertorch.cuda.amp.GradScaler>`_ # noqa: E501
Defaults to ``dynamic``.
- "dynamic": Initialize GradScale without any arguments.
- float: Initialize GradScaler with ``init_scale``.
- dict: Initialize GradScaler with more detail configuration.
**kwargs: Keyword arguments passed to OptimWrapper.
Note:
If you use ``IterBasedRunner`` and enable gradient accumulation,
the original `max_iters` should be multiplied by
``accumulative_counts``.
"""
def __init__(self, loss_scale='dynamic', **kwargs):
assert digit_version(TORCH_VERSION) >= digit_version('1.6.0'), (
'`torch.cuda.amp` is only available when pytorch version >= 1.6')
assert torch.cuda.is_available(), (
'``AmpOptimizerWrapper`` is only available training on gpu')
super().__init__(**kwargs)
self._scale_update_param = None
if loss_scale == 'dynamic':
# If loss_scale is a string, it must be 'dynamic', then dynamic
# loss scaling will be used.
self.loss_scaler = GradScaler()
elif isinstance(loss_scale, float):
# Static loss scaling
self._scale_update_param = loss_scale
self.loss_scaler = GradScaler(init_scale=loss_scale)
elif isinstance(loss_scale, dict):
# More specific configuration.
self.loss_scaler = GradScaler(**loss_scale)
else:
raise TypeError('loss_scale must be of type float, dict, or '
f'"dynamic", but got {loss_scale}')
def backward(self, loss: torch.Tensor):
"""Perform gradient back propagation with :attr:`loss_scaler`.
Args:
loss (torch.Tensor): The loss of current iteration.
"""
self.loss_scaler.scale(loss).backward()
self._inner_count += 1
def step(self):
"""Update parameters with :attr:`loss_scaler`."""
if self.clip_grad_kwargs:
self.loss_scaler.unscale_(self.optimizer)
self._clip_grad()
self.loss_scaler.step(self.optimizer)
self.loss_scaler.update(self._scale_update_param)
def state_dict(self) -> dict:
"""Get the state dictionary of :attr:`optimizer` and
:attr:`loss_scaler`.
Based on the state dictionary of the optimizer, the returned state
dictionary will add a key named "loss_scaler".
Returns:
dict: The merged state dict of :attr:`loss_scaler` and
:attr:`optimizer`.
"""
# save state_dict of loss_scaler
state_dict = self.optimizer.state_dict()
state_dict['loss_scaler'] = self.loss_scaler.state_dict()
return state_dict
def load_state_dict(self, state_dict: dict):
"""Load and parse the state dictionary of :attr:`optimizer` and
:attr:`loss_scaler`.
If state_dict contains "loss_scaler.", the :attr:`loss_scaler` will
load the corresponding keys. Otherwise, only the :attr:`optimizer`
will load the state dictionary.
Args:
state_dict (dict): The state dict of :attr:`optimizer` and
:attr:`loss_scaler`
"""
if 'loss_scaler' in state_dict:
self.loss_scaler.load_state_dict(state_dict.pop('loss_scaler'))
self.optimizer.load_state_dict(state_dict)
@contextmanager
def optim_context(self, model: nn.Module):
"""Enables the context for mixed precision training, and enables the
context for disabling gradient synchronization during gradient
accumulation context.
Args:
model (nn.Module): The training model.
"""
from mmengine.runner.amp import autocast
with super().optim_context(model), autocast():
yield
|
# Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
import torch
import torch.nn as nn
from torch.cuda.amp import GradScaler
from mmengine.registry import OPTIM_WRAPPERS
from mmengine.utils import TORCH_VERSION, digit_version
from .optimizer_wrapper import OptimWrapper
@OPTIM_WRAPPERS.register_module()
class AmpOptimWrapper(OptimWrapper):
"""A subclass of :class:`OptimWrapper` that supports automatic mixed
precision training based on torch.cuda.amp.
``AmpOptimWrapper`` provides a unified interface with
``OptimWrapper``, so ``AmpOptimWrapper`` can be used in the same way
as ``OptimWrapper``.
Warnings:
``AmpOptimWrapper`` requires PyTorch >= 1.6.
Args:
loss_scale (float or str or dict): The initial configuration of
`torch.cuda.amp.GradScaler`. See more specific arguments
introduction at `PyTorch AMP <https://pytorch.org/docs/stable/amp.html?highlight=gradscalertorch.cuda.amp.GradScaler>`_ # noqa: E501
Defaults to ``dynamic``.
- "dynamic": Initialize GradScale without any arguments.
- float: Initialize GradScaler with ``init_scale``.
- dict: Initialize GradScaler with more detail configuration.
**kwargs: Keyword arguments passed to OptimWrapper.
Note:
If you use ``IterBasedRunner`` and enable gradient accumulation,
the original `max_iters` should be multiplied by
``accumulative_counts``.
"""
def __init__(self, loss_scale='dynamic', **kwargs):
assert digit_version(TORCH_VERSION) >= digit_version('1.6.0'), (
'`torch.cuda.amp` is only available when pytorch version >= 1.6')
assert torch.cuda.is_available(), (
'``AmpOptimizerWrapper`` is only available training on gpu')
super().__init__(**kwargs)
self._scale_update_param = None
if loss_scale == 'dynamic':
# If loss_scale is a string, it must be 'dynamic', then dynamic
# loss scaling will be used.
self.loss_scaler = GradScaler()
elif isinstance(loss_scale, float):
# Static loss scaling
self._scale_update_param = loss_scale
self.loss_scaler = GradScaler(init_scale=loss_scale)
elif isinstance(loss_scale, dict):
# More specific configuration.
self.loss_scaler = GradScaler(**loss_scale)
else:
raise TypeError('loss_scale must be of type float, dict, or '
f'"dynamic", but got {loss_scale}')
def backward(self, loss: torch.Tensor):
"""Perform gradient back propagation with :attr:`loss_scaler`.
Args:
loss (torch.Tensor): The loss of current iteration.
"""
self.loss_scaler.scale(loss).backward()
self._inner_count += 1
def step(self):
"""Update parameters with :attr:`loss_scaler`."""
if self.clip_grad_kwargs:
self.loss_scaler.unscale_(self.optimizer)
self._clip_grad()
self.loss_scaler.step(self.optimizer)
self.loss_scaler.update(self._scale_update_param)
def state_dict(self) -> dict:
"""Get the state dictionary of :attr:`optimizer` and
:attr:`loss_scaler`.
Based on the state dictionary of the optimizer, the returned state
dictionary will add a key named "loss_scaler".
Returns:
dict: The merged state dict of :attr:`loss_scaler` and
:attr:`optimizer`.
"""
# save state_dict of loss_scaler
state_dict = self.optimizer.state_dict()
state_dict['loss_scaler'] = self.loss_scaler.state_dict()
return state_dict
def load_state_dict(self, state_dict: dict):
"""Load and parse the state dictionary of :attr:`optimizer` and
:attr:`loss_scaler`.
If state_dict contains "loss_scaler.", the :attr:`loss_scaler` will
load the corresponding keys. Otherwise, only the :attr:`optimizer`
will load the state dictionary.
Args:
state_dict (dict): The state dict of :attr:`optimizer` and
:attr:`loss_scaler`
"""
if 'loss_scaler' in state_dict:
self.loss_scaler.load_state_dict(state_dict.pop('loss_scaler'))
self.optimizer.load_state_dict(state_dict)
@contextmanager
def optim_context(self, model: nn.Module):
"""Enables the context for mixed precision training, and enables the
context for disabling gradient synchronization during gradient
accumulation context.
Args:
model (nn.Module): The training model.
"""
with super().optim_context(model), torch.cuda.amp.autocast():
yield
|
"""Test Anthropic API wrapper."""
from collections.abc import Generator
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.outputs import LLMResult
from langchain_anthropic import Anthropic
from tests.unit_tests._utils import FakeCallbackHandler
@pytest.mark.requires("anthropic")
def test_anthropic_model_name_param() -> None:
llm = Anthropic(model_name="foo")
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_param() -> None:
llm = Anthropic(model="foo") # type: ignore[call-arg]
assert llm.model == "foo"
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
llm = Anthropic(model="claude-2.1") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
llm = Anthropic(model="claude-2.1") # type: ignore[call-arg]
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
assert callback_handler.llm_streams > 1
async def test_anthropic_async_generate() -> None:
"""Test async generate."""
llm = Anthropic()
output = await llm.agenerate(["How many toes do dogs have?"])
assert isinstance(output, LLMResult)
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["How many toes do dogs have?"])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
|
"""Test Anthropic API wrapper."""
from typing import Generator
import pytest
from langchain_core.callbacks import CallbackManager
from langchain_core.outputs import LLMResult
from langchain_anthropic import Anthropic
from tests.unit_tests._utils import FakeCallbackHandler
@pytest.mark.requires("anthropic")
def test_anthropic_model_name_param() -> None:
llm = Anthropic(model_name="foo")
assert llm.model == "foo"
@pytest.mark.requires("anthropic")
def test_anthropic_model_param() -> None:
llm = Anthropic(model="foo") # type: ignore[call-arg]
assert llm.model == "foo"
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
llm = Anthropic(model="claude-2.1") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
llm = Anthropic(model="claude-2.1") # type: ignore[call-arg]
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
assert callback_handler.llm_streams > 1
async def test_anthropic_async_generate() -> None:
"""Test async generate."""
llm = Anthropic()
output = await llm.agenerate(["How many toes do dogs have?"])
assert isinstance(output, LLMResult)
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["How many toes do dogs have?"])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
|
import logging
from typing import Any
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils
event_bus = execution_utils.get_execution_event_bus()
graph_exec = execution_utils.add_graph_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
user_id=graph_exec.user_id,
graph_id=graph_exec.graph_id,
graph_exec_id=graph_exec.graph_exec_id,
):
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
user_id=graph_exec.user_id,
graph_id=graph_exec.graph_id,
graph_exec_id=graph_exec.graph_exec_id,
):
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
"""A script to generate math_impl.h.
Prerequisites:
python 3.11 or newer
functional_algorithms 0.3.1 or newer
Usage:
Running
python /path/to/generate_math_impl.py [xla | tensorflow]
will create
/path/to/math_impl.cc
"""
import os
import sys
import warnings
try:
import functional_algorithms as fa # pylint: disable=g-import-not-at-top
except ImportError as msg:
warnings.warn(f"Skipping: {msg}")
fa = None
def main():
if fa is None:
return
target = (sys.argv[1] if len(sys.argv) > 1 else "xla").lower()
assert target in {"xla", "tensorflow"}, target
header_file_define = dict(
xla="XLA_CLIENT_MATH_IMPL_H_",
tensorflow="TENSORFLOW_COMPILER_XLA_CLIENT_MATH_IMPL_H_",
)[target]
fa_version = tuple(map(int, fa.__version__.split(".", 4)[:3]))
if fa_version < (0, 3, 1):
warnings.warn(
"functional_algorithm version 0.3.1 or newer is required,"
f" got {fa.__version__}"
)
return
output_file = os.path.join(os.path.dirname(__file__), "math_impl.h")
sources = []
target = fa.targets.xla_client
for xlaname, fname, args in [
("AsinComplex", "complex_asin", ("z:complex",)),
("AsinReal", "real_asin", ("x:float",)),
]:
func = getattr(fa.algorithms, fname, None)
if func is None:
warnings.warn(
f"{fa.algorithms.__name__} does not define {fname}. Skipping."
)
continue
ctx = fa.Context(
paths=[fa.algorithms],
enable_alt=True,
default_constant_type="FloatType",
)
graph = ctx.trace(func, *args).implement_missing(target).simplify()
graph.props.update(name=xlaname)
src = graph.tostring(target)
if func.__doc__:
sources.append(target.make_comment(func.__doc__))
sources[-1] += src
source = "\n\n".join(sources) + "\n"
if os.path.isfile(output_file):
f = open(output_file, "r", encoding="UTF-8")
content = f.read()
f.close()
if content.endswith(source) and 0:
warnings.warn(f"{output_file} is up-to-date.")
return
f = open(output_file, "w", encoding="UTF-8")
f.write("""/* Copyright 2024 The OpenXLA Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
""")
f.write(target.make_comment(f"""\
This file is generated using functional_algorithms tool ({fa.__version__}), see
https://github.com/pearu/functional_algorithms
for more information.""") + "\n")
f.write(f"""\
#ifndef {header_file_define}
#define {header_file_define}
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
namespace xla {{
namespace math_impl {{
// NOLINTBEGIN(whitespace/line_length)
// clang-format off
""")
f.write(source)
f.write(f"""
// clang-format on
// NOLINTEND(whitespace/line_length)
}} // namespace math_impl
}} // namespace xla
#endif // {header_file_define}
""")
f.close()
warnings.warn(f"Created {output_file}")
if __name__ == "__main__":
main()
|
"""A script to generate math_impl.h.
Prerequisites:
python 3.11 or newer
functional_algorithms 0.3.1 or newer
Usage:
Running
python /path/to/generate_math_impl.py [xla | tensorflow]
will create
/path/to/math_impl.cc
"""
import os
import sys
import warnings
try:
import functional_algorithms as fa # pylint: disable=g-import-not-at-top
except ImportError as msg:
warnings.warn(f"Skipping: {msg}")
fa = None
def main():
if fa is None:
return
target = (sys.argv[1] if len(sys.argv) > 1 else "xla").lower()
assert target in {"xla", "tensorflow"}, target
header_file_define = dict(
xla="XLA_CLIENT_MATH_IMPL_H_",
tensorflow="TENSORFLOW_COMPILER_XLA_CLIENT_MATH_IMPL_H_",
)[target]
fa_version = tuple(map(int, fa.__version__.split(".", 4)[:3]))
if fa_version < (0, 3, 1):
warnings.warn(
"functional_algorithm version 0.3.1 or newer is required,"
f" got {fa.__version__}"
)
return
output_file = os.path.join(os.path.dirname(__file__), "math_impl.h")
sources = []
target = fa.targets.xla_client
for xlaname, fname, args in [
("AsinComplex", "complex_asin", ("z:complex",)),
("AsinReal", "real_asin", ("x:float",)),
]:
func = getattr(fa.algorithms, fname, None)
if func is None:
warnings.warn(
f"{fa.algorithms.__name__} does not define {fname}. Skipping."
)
continue
ctx = fa.Context(
paths=[fa.algorithms],
enable_alt=True,
default_constant_type="FloatType",
)
graph = ctx.trace(func, *args).implement_missing(target).simplify()
graph.props.update(name=xlaname)
src = graph.tostring(target)
if func.__doc__:
sources.append(target.make_comment(func.__doc__))
sources[-1] += src
source = "\n\n".join(sources) + "\n"
if os.path.isfile(output_file):
f = open(output_file, "r", encoding="UTF-8")
content = f.read()
f.close()
if content.endswith(source) and 0:
warnings.warn(f"{output_file} is up-to-date.")
return
f = open(output_file, "w", encoding="UTF-8")
f.write("""/* Copyright 2024 The OpenXLA Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
""")
f.write(target.make_comment(f"""\
This file is generated using functional_algorithms tool ({fa.__version__}), see
https://github.com/pearu/functional_algorithms
for more information.""") + "\n")
f.write(f"""\
#ifndef {header_file_define}
#define {header_file_define}
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
namespace xla {{
namespace math_impl {{
// NOLINTBEGIN(whitespace/line_length)
// clang-format off
""")
f.write(source)
f.write(f"""
// clang-format on
// NOLINTEND(whitespace/line_length)
}} // namespace math_impl
}} // namespace xla
#endif // {header_file_define}
""")
f.close()
warnings.warn(f"Created {output_file}")
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_run(self, runner) -> None:
"""Synchronize the number of iterations with the runner.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_dataloader
else:
cur_dataloader = runner.test_dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_run(self, runner) -> None:
"""Synchronize the number of iterations with the runner.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (
runner.train_loop.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_loop.dataloader
else:
cur_dataloader = runner.test_loop.dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
_base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
backbone=dict(deepen_factor=0.67, widen_factor=0.75),
neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
bbox_head=dict(in_channels=192, feat_channels=192),
)
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
backbone=dict(deepen_factor=0.67, widen_factor=0.75),
neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
bbox_head=dict(in_channels=192, feat_channels=192),
)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco-instance.py',
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco_instance.py',
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator)
def get_jax_iterator(self):
from keras.src.backend.jax.core import convert_to_tensor
def convert_to_jax(x):
if data_adapter_utils.is_scipy_sparse(x):
return data_adapter_utils.scipy_sparse_to_jax_sparse(x)
elif data_adapter_utils.is_tensorflow_sparse(x):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
return convert_to_tensor(x)
for batch in self.generator:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x, spec):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
if not spec.shape.is_compatible_with(x.shape):
raise TypeError(
f"Generator yielded an element of shape {x.shape} where "
f"an element of shape {spec.shape} was expected. Your "
"generator provides tensors with variable input "
"dimensions other than the batch size. Make sure that the "
"generator's first two batches do not have the same "
"dimension value wherever there is a variable input "
"dimension."
)
return x
def get_tf_iterator():
for batch in self.generator:
batch = tree.map_structure(
convert_to_tf, batch, self._output_signature
)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator)
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, itertools.chain(batches, generator)
|
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator)
def get_jax_iterator(self):
from keras.src.backend.jax.core import convert_to_tensor
def convert_to_jax(x):
if data_adapter_utils.is_scipy_sparse(x):
return data_adapter_utils.scipy_sparse_to_jax_sparse(x)
elif data_adapter_utils.is_tensorflow_sparse(x):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
return convert_to_tensor(x)
for batch in self.generator:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
return x
def get_tf_iterator():
for batch in self.generator:
batch = tree.map_structure(convert_to_tf, batch)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator)
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, itertools.chain(batches, generator)
|
from enum import Enum
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of values."""
enum: type[Enum]
"""The enum to parse. Its values must be strings."""
@pre_init
def raise_deprecation(cls, values: dict) -> dict:
enum = values["enum"]
if not all(isinstance(e.value, str) for e in enum):
msg = "Enum values must be strings"
raise ValueError(msg)
return values
@property
def _valid_values(self) -> list[str]:
return [e.value for e in self.enum]
def parse(self, response: str) -> Enum:
try:
return self.enum(response.strip())
except ValueError as e:
msg = (
f"Response '{response}' is not one of the "
f"expected values: {self._valid_values}"
)
raise OutputParserException(msg) from e
def get_format_instructions(self) -> str:
return f"Select one of the following options: {', '.join(self._valid_values)}"
@property
def OutputType(self) -> type[Enum]:
return self.enum
|
from enum import Enum
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of values."""
enum: type[Enum]
"""The enum to parse. Its values must be strings."""
@pre_init
def raise_deprecation(cls, values: dict) -> dict:
enum = values["enum"]
if not all(isinstance(e.value, str) for e in enum):
msg = "Enum values must be strings"
raise ValueError(msg)
return values
@property
def _valid_values(self) -> list[str]:
return [e.value for e in self.enum]
def parse(self, response: str) -> Enum:
try:
return self.enum(response.strip())
except ValueError:
msg = (
f"Response '{response}' is not one of the "
f"expected values: {self._valid_values}"
)
raise OutputParserException(msg)
def get_format_instructions(self) -> str:
return f"Select one of the following options: {', '.join(self._valid_values)}"
@property
def OutputType(self) -> type[Enum]:
return self.enum
|
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
class RNNCellWithDropout(layers.Layer, DropoutRNNCell):
def __init__(
self, units, dropout=0.5, recurrent_dropout=0.5, seed=None, **kwargs
):
super().__init__(**kwargs)
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed)
self.units = units
self.state_size = units
self.dropout = dropout
self.recurrent_dropout = recurrent_dropout
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="ones",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="ones",
name="recurrent_kernel",
)
def call(self, inputs, states, training=False):
if training:
dp_mask = self.get_dropout_mask(inputs)
inputs = inputs * dp_mask
prev_output = states[0]
h = ops.matmul(inputs, self.kernel)
if training:
rdp_mask = self.get_recurrent_dropout_mask(prev_output)
prev_output = prev_output * rdp_mask
output = h + ops.matmul(prev_output, self.recurrent_kernel)
return output, [output]
class DropoutRNNCellTest(testing.TestCase):
def test_seed_tracking(self):
cell = RNNCellWithDropout(3, seed=1337)
self.assertEqual(len(cell.non_trainable_variables), 1)
layer = layers.RNN(cell)
self.assertEqual(len(layer.non_trainable_variables), 1)
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": RNNCellWithDropout(5, seed=1337)},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
run_mixed_precision_check=False,
)
# manually set dtype to mixed_float16 to run mixed precision check
run_mixed_precision_check = True
if backend.backend() == "torch":
import torch
run_mixed_precision_check = torch.cuda.is_available()
if run_mixed_precision_check:
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": RNNCellWithDropout(
5, seed=1337, dtype="mixed_float16"
),
"dtype": "mixed_float16",
},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
run_mixed_precision_check=False,
)
|
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
class RNNCellWithDropout(layers.Layer, DropoutRNNCell):
def __init__(
self, units, dropout=0.5, recurrent_dropout=0.5, seed=None, **kwargs
):
super().__init__(**kwargs)
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed)
self.units = units
self.state_size = units
self.dropout = dropout
self.recurrent_dropout = recurrent_dropout
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="ones",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="ones",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states, training=False):
if training:
dp_mask = self.get_dropout_mask(inputs)
inputs = inputs * dp_mask
prev_output = states[0]
h = ops.matmul(inputs, self.kernel)
if training:
rdp_mask = self.get_recurrent_dropout_mask(prev_output)
prev_output = prev_output * rdp_mask
output = h + ops.matmul(prev_output, self.recurrent_kernel)
return output, [output]
class DropoutRNNCellTest(testing.TestCase):
def test_seed_tracking(self):
cell = RNNCellWithDropout(3, seed=1337)
self.assertEqual(len(cell.non_trainable_variables), 1)
layer = layers.RNN(cell)
self.assertEqual(len(layer.non_trainable_variables), 1)
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": RNNCellWithDropout(5, seed=1337)},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
run_mixed_precision_check=False,
)
# manually set dtype to mixed_float16 to run mixed precision check
run_mixed_precision_check = True
if backend.backend() == "torch":
import torch
run_mixed_precision_check = torch.cuda.is_available()
if run_mixed_precision_check:
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": RNNCellWithDropout(
5, seed=1337, dtype="mixed_float16"
),
"dtype": "mixed_float16",
},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
run_mixed_precision_check=False,
)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
import logging
import sys
import os
import tarfile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.13.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.13'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from docarray import Document, DocumentArray
# Client
from jina.clients import Client
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Deployment
from jina.orchestrate.deployments import Deployment
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.gateway import BaseGateway as Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.13.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.13'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from docarray import Document, DocumentArray
# Client
from jina.clients import Client
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.gateway import BaseGateway as Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[0],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
from typing import Any
from langchain_community.adapters import openai as lcopenai
def _test_no_stream(**kwargs: Any) -> None:
import openai
result = openai.ChatCompletion.create(**kwargs)
lc_result = lcopenai.ChatCompletion.create(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
def _test_stream(**kwargs: Any) -> None:
import openai
result = []
for c in openai.ChatCompletion.create(**kwargs):
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
for c in lcopenai.ChatCompletion.create(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
async def _test_async(**kwargs: Any) -> None:
import openai
result = await openai.ChatCompletion.acreate(**kwargs)
lc_result = await lcopenai.ChatCompletion.acreate(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
async def _test_astream(**kwargs: Any) -> None:
import openai
result = []
async for c in await openai.ChatCompletion.acreate(**kwargs):
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
async for c in await lcopenai.ChatCompletion.acreate(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
FUNCTIONS = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
]
async def _test_module(**kwargs: Any) -> None:
_test_no_stream(**kwargs)
await _test_async(**kwargs)
_test_stream(stream=True, **kwargs)
await _test_astream(stream=True, **kwargs)
async def test_normal_call() -> None:
await _test_module(
messages=[{"role": "user", "content": "hi"}],
model="gpt-3.5-turbo",
temperature=0,
)
async def test_function_calling() -> None:
await _test_module(
messages=[{"role": "user", "content": "whats the weather in boston"}],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
async def test_answer_with_function_calling() -> None:
await _test_module(
messages=[
{"role": "user", "content": "say hi, then whats the weather in boston"}
],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
|
from typing import Any
from langchain_community.adapters import openai as lcopenai
def _test_no_stream(**kwargs: Any) -> None:
import openai
result = openai.ChatCompletion.create(**kwargs) # type: ignore[attr-defined]
lc_result = lcopenai.ChatCompletion.create(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
def _test_stream(**kwargs: Any) -> None:
import openai
result = []
for c in openai.ChatCompletion.create(**kwargs): # type: ignore[attr-defined]
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
for c in lcopenai.ChatCompletion.create(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
async def _test_async(**kwargs: Any) -> None:
import openai
result = await openai.ChatCompletion.acreate(**kwargs) # type: ignore[attr-defined]
lc_result = await lcopenai.ChatCompletion.acreate(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
async def _test_astream(**kwargs: Any) -> None:
import openai
result = []
async for c in await openai.ChatCompletion.acreate(**kwargs): # type: ignore[attr-defined]
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
async for c in await lcopenai.ChatCompletion.acreate(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
FUNCTIONS = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
]
async def _test_module(**kwargs: Any) -> None:
_test_no_stream(**kwargs)
await _test_async(**kwargs)
_test_stream(stream=True, **kwargs)
await _test_astream(stream=True, **kwargs)
async def test_normal_call() -> None:
await _test_module(
messages=[{"role": "user", "content": "hi"}],
model="gpt-3.5-turbo",
temperature=0,
)
async def test_function_calling() -> None:
await _test_module(
messages=[{"role": "user", "content": "whats the weather in boston"}],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
async def test_answer_with_function_calling() -> None:
await _test_module(
messages=[
{"role": "user", "content": "say hi, then whats the weather in boston"}
],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
|
__version__ = '0.20.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.20.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
file_client_args=file_client_args,
)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/', seg=None),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
file_client_args=file_client_args,
)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/', seg=None),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
"""
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
import sys
import time
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
# Limit torch to 4 threads
torch.set_num_threads(4)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-nli-mean-tokens"
# Load a sentence transformer model
model = SentenceTransformer(model_name)
max_sentences = 100_000
all_nli_dataset = load_dataset("sentence-transformers/all-nli", "pair", split="train")
sentences = list(set(all_nli_dataset["anchor"]))[:max_sentences]
print("Model Name:", model_name)
print("Number of sentences:", len(sentences))
for i in range(3):
print("Run", i)
start_time = time.time()
emb = model.encode(sentences, batch_size=32)
end_time = time.time()
diff_time = end_time - start_time
print("Done after {:.2f} seconds".format(diff_time))
print("Speed: {:.2f} sentences / second".format(len(sentences) / diff_time))
print("=====")
|
"""
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
from sentence_transformers import SentenceTransformer, util
import sys
import os
import time
import torch
import gzip
import csv
# Limit torch to 4 threads
torch.set_num_threads(4)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-nli-mean-tokens"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
nli_dataset_path = "datasets/AllNLI.tsv.gz"
sentences = set()
max_sentences = 100000
# Download datasets if needed
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
sentences.add(row["sentence1"])
if len(sentences) >= max_sentences:
break
sentences = list(sentences)
print("Model Name:", model_name)
print("Number of sentences:", len(sentences))
for i in range(3):
print("Run", i)
start_time = time.time()
emb = model.encode(sentences, batch_size=32)
end_time = time.time()
diff_time = end_time - start_time
print("Done after {:.2f} seconds".format(diff_time))
print("Speed: {:.2f} sentences / second".format(len(sentences) / diff_time))
print("=====")
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
import asyncio
import random
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
@pytest.mark.parametrize('results_in_order', [False, True])
async def test_request_streamer(
prefetch, num_requests, async_iterator, results_in_order
):
requests_handled = []
results_handled = []
request_ids = [random_identity() for _ in range(num_requests)]
response_ids = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
rand_sleep = random.uniform(0.1, 0.6)
await asyncio.sleep(rand_sleep)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) <= num_requests
def _yield_data_request(i):
req = DataRequest()
req.header.request_id = request_ids[i]
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request(i)
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request(i)
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(request_iterator=it, results_in_order=results_in_order)
num_responses = 0
async for r in response:
response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(request_ids) == len(response_ids)
if results_in_order:
for req_id, resp_id in zip(request_ids, response_ids):
assert req_id == resp_id
|
import asyncio
import pytest
import random
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
@pytest.mark.parametrize('results_in_order', [False, True])
async def test_request_streamer(prefetch, num_requests, async_iterator, results_in_order):
requests_handled = []
results_handled = []
request_ids = [random_identity() for _ in range(num_requests)]
response_ids = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
rand_sleep = random.uniform(0.1, 0.6)
await asyncio.sleep(rand_sleep)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) <= num_requests
def _yield_data_request(i):
req = DataRequest()
req.header.request_id = request_ids[i]
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request(i)
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request(i)
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(request_iterator=it, results_in_order=results_in_order)
num_responses = 0
async for r in response:
response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(request_ids) == len(response_ids)
if results_in_order:
for req_id, resp_id in zip(request_ids, response_ids):
assert req_id == resp_id
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
__all__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
__all__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
|
import pytest
from whisper.normalizers import EnglishTextNormalizer
from whisper.normalizers.english import EnglishNumberNormalizer, EnglishSpellingNormalizer
@pytest.mark.parametrize("std", [EnglishNumberNormalizer(), EnglishTextNormalizer()])
def test_number_normalizer(std):
assert std("two") == "2"
assert std("thirty one") == "31"
assert std("five twenty four") == "524"
assert std("nineteen ninety nine") == "1999"
assert std("twenty nineteen") == "2019"
assert std("two point five million") == "2500000"
assert std("four point two billions") == "4200000000s"
assert std("200 thousand") == "200000"
assert std("200 thousand dollars") == "$200000"
assert std("$20 million") == "$20000000"
assert std("€52.4 million") == "€52400000"
assert std("£77 thousands") == "£77000s"
assert std("two double o eight") == "2008"
assert std("three thousand twenty nine") == "3029"
assert std("forty three thousand two hundred sixty") == "43260"
assert std("forty three thousand two hundred and sixty") == "43260"
assert std("nineteen fifties") == "1950s"
assert std("thirty first") == "31st"
assert std("thirty three thousand and three hundred and thirty third") == "33333rd"
assert std("three billion") == "3000000000"
assert std("millions") == "1000000s"
assert std("july third twenty twenty") == "july 3rd 2020"
assert std("august twenty sixth twenty twenty one") == "august 26th 2021"
assert std("3 14") == "3 14"
assert std("3.14") == "3.14"
assert std("3 point 2") == "3.2"
assert std("3 point 14") == "3.14"
assert std("fourteen point 4") == "14.4"
assert std("two point two five dollars") == "$2.25"
assert std("two hundred million dollars") == "$200000000"
assert std("$20.1 million") == "$20100000"
assert std("ninety percent") == "90%"
assert std("seventy six per cent") == "76%"
assert std("double oh seven") == "007"
assert std("double zero seven") == "007"
assert std("nine one one") == "911"
assert std("nine double one") == "911"
assert std("one triple oh one") == "10001"
assert std("two thousandth") == "2000th"
assert std("thirty two thousandth") == "32000th"
assert std("minus 500") == "-500"
assert std("positive twenty thousand") == "+20000"
assert std("two dollars and seventy cents") == "$2.70"
assert std("3 cents") == "¢3"
assert std("$0.36") == "¢36"
assert std("three euros and sixty five cents") == "€3.65"
assert std("three and a half million") == "3500000"
assert std("forty eight and a half dollars") == "$48.5"
assert std("b747") == "b 747"
assert std("10 th") == "10th"
assert std("10th") == "10th"
def test_spelling_normalizer():
std = EnglishSpellingNormalizer()
assert std("mobilisation") == "mobilization"
assert std("cancelation") == "cancellation"
def test_text_normalizer():
std = EnglishTextNormalizer()
assert std("Let's") == "let us"
assert std("he's like") == "he is like"
assert std("she's been like") == "she has been like"
assert std("10km") == "10 km"
assert std("10mm") == "10 mm"
assert std("RC232") == "rc 232"
assert (
std("Mr. Park visited Assoc. Prof. Kim Jr.")
== "mister park visited associate professor kim junior"
)
|
import pytest
from whisper.normalizers import EnglishTextNormalizer
from whisper.normalizers.english import EnglishNumberNormalizer, EnglishSpellingNormalizer
@pytest.mark.parametrize("std", [EnglishNumberNormalizer(), EnglishTextNormalizer()])
def test_number_normalizer(std):
assert std("two") == "2"
assert std("thirty one") == "31"
assert std("five twenty four") == "524"
assert std("nineteen ninety nine") == "1999"
assert std("twenty nineteen") == "2019"
assert std("two point five million") == "2500000"
assert std("four point two billions") == "4200000000s"
assert std("200 thousand") == "200000"
assert std("200 thousand dollars") == "$200000"
assert std("$20 million") == "$20000000"
assert std("€52.4 million") == "€52400000"
assert std("£77 thousands") == "£77000s"
assert std("two double o eight") == "2008"
assert std("three thousand twenty nine") == "3029"
assert std("forty three thousand two hundred sixty") == "43260"
assert std("forty three thousand two hundred and sixty") == "43260"
assert std("nineteen fifties") == "1950s"
assert std("thirty first") == "31st"
assert std("thirty three thousand and three hundred and thirty third") == "33333rd"
assert std("three billion") == "3000000000"
assert std("millions") == "1000000s"
assert std("july third twenty twenty") == "july 3rd 2020"
assert std("august twenty sixth twenty twenty one") == "august 26th 2021"
assert std("3 14") == "3 14"
assert std("3.14") == "3.14"
assert std("3 point 2") == "3.2"
assert std("3 point 14") == "3.14"
assert std("fourteen point 4") == "14.4"
assert std("two point two five dollars") == "$2.25"
assert std("two hundred million dollars") == "$200000000"
assert std("$20.1 million") == "$20100000"
assert std("ninety percent") == "90%"
assert std("seventy six per cent") == "76%"
assert std("double oh seven") == "007"
assert std("double zero seven") == "007"
assert std("nine one one") == "911"
assert std("nine double one") == "911"
assert std("one triple oh one") == "10001"
assert std("two thousandth") == "2000th"
assert std("thirty two thousandth") == "32000th"
assert std("minus 500") == "-500"
assert std("positive twenty thousand") == "+20000"
assert std("two dollars and seventy cents") == "$2.70"
assert std("3 cents") == "¢3"
assert std("$0.36") == "¢36"
assert std("three euros and sixty five cents") == "€3.65"
assert std("three and a half million") == "3500000"
assert std("forty eight and a half dollars") == "$48.5"
assert std("b747") == "b 747"
assert std("10 th") == "10th"
assert std("10th") == "10th"
def test_spelling_normalizer():
std = EnglishSpellingNormalizer()
assert std("mobilisation") == "mobilization"
assert std("cancelation") == "cancellation"
def test_text_normalizer():
std = EnglishTextNormalizer()
assert std("Let's") == "let us"
assert std("he's like") == "he is like"
assert std("she's been like") == "she has been like"
assert std("10km") == "10 km"
assert std("RC232") == "rc 232"
assert (
std("Mr. Park visited Assoc. Prof. Kim Jr.")
== "mister park visited associate professor kim junior"
)
|
from __future__ import annotations
__version__ = "3.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import export_dynamic_quantized_onnx_model, export_optimized_onnx_model
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
]
|
from __future__ import annotations
__version__ = "3.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FOVEA',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs='on_input'),
bbox_head=dict(
type='FoveaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.50,
alpha=0.4,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(),
test_cfg=dict(
nms_pre=1000,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
train_dataloader = dict(batch_size=4, num_workers=4)
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FOVEA',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs='on_input'),
bbox_head=dict(
type='FoveaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.50,
alpha=0.4,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(),
test_cfg=dict(
nms_pre=1000,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
data = dict(samples_per_gpu=4, workers_per_gpu=4)
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_binary_metric import (
AnswerConsistencyBinaryMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyBinaryEvaluator(BaseEvaluator):
"""
Tonic Validate's answer consistency binary metric.
The output score is a float that is either 0.0 or 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyBinaryMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_binary_metric import (
AnswerConsistencyBinaryMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyBinaryEvaluator(BaseEvaluator):
"""
Tonic Validate's answer consistency binary metric.
The output score is a float that is either 0.0 or 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyBinaryMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import (
AddedToken,
AutoConfig,
AutoTokenizer,
CLIPImageProcessor,
LlavaProcessor,
VipLlavaConfig,
VipLlavaForConditionalGeneration,
)
KEYS_TO_MODIFY_MAPPING = {
"model.vision_tower.": "",
"model.mm_projector": "multi_modal_projector",
"model": "model.model",
"vision_model.model": "vision_model",
"lm_head": "language_model.lm_head",
"model.model": "language_model.model",
"multi_modal_projector.0": "multi_modal_projector.linear_1",
"multi_modal_projector.2": "multi_modal_projector.linear_2",
"final_linear.0": "linear_1",
"final_linear.2": "linear_2",
"multi_modal_projector.clip_layernorm": "multi_modal_projector.projector_layernorm",
}
# Copied from transformers.models.llava.convert_llava_weights_to_hf.convert_state_dict_to_hf
def convert_state_dict_to_hf(state_dict):
new_state_dict = {}
for key, value in state_dict.items():
if key.endswith(".inv_freq"):
continue
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
new_state_dict[key] = value
return new_state_dict
def convert_vipllava_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id):
torch.set_default_dtype(torch.float16)
text_config = AutoConfig.from_pretrained(text_model_id)
tokenizer = AutoTokenizer.from_pretrained(text_model_id)
tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True)
tokenizer.add_special_tokens({"pad_token": "<pad>"})
image_processor = CLIPImageProcessor.from_pretrained(vision_model_id)
processor = LlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
config = VipLlavaConfig(text_config=text_config)
config.pad_token_id = 32001
with torch.device("meta"):
model = VipLlavaForConditionalGeneration(config)
# Pad to 64 for performance reasons
pad_shape = 64
state_dict_path = hf_hub_download(old_state_dict_id, "model_state_dict_7b.bin")
state_dict = torch.load(state_dict_path, map_location="cpu", weights_only=True)
state_dict = convert_state_dict_to_hf(state_dict)
model.load_state_dict(state_dict, strict=True, assign=True)
pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
mu = torch.mean(pre_expansion_embeddings, dim=0).float()
n = pre_expansion_embeddings.size()[0]
sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
# We add an image token so we resize the model
model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape)
model.language_model.model.embed_tokens.weight.data[32000:] = torch.stack(
tuple(dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[32000:].shape[0])),
dim=0,
)
model.language_model.lm_head.weight.data[32000:] = torch.stack(
tuple(dist.sample() for _ in range(model.language_model.lm_head.weight.data[32000:].shape[0])),
dim=0,
)
model.push_to_hub(output_hub_path)
processor.push_to_hub(output_hub_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--text_model_id",
help="Hub location of the text model",
)
parser.add_argument(
"--vision_model_id",
help="Hub location of the vision model",
)
parser.add_argument(
"--output_hub_path",
help="Location on the hub of the converted model",
)
parser.add_argument(
"--old_state_dict_id",
help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`",
)
args = parser.parse_args()
convert_vipllava_llama_to_hf(
args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id
)
if __name__ == "__main__":
main()
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import (
AddedToken,
AutoConfig,
AutoTokenizer,
CLIPImageProcessor,
LlavaProcessor,
VipLlavaConfig,
VipLlavaForConditionalGeneration,
)
KEYS_TO_MODIFY_MAPPING = {
"model.vision_tower.": "",
"model.mm_projector": "multi_modal_projector",
"model": "model.model",
"vision_model.model": "vision_model",
"lm_head": "language_model.lm_head",
"model.model": "language_model.model",
"multi_modal_projector.0": "multi_modal_projector.linear_1",
"multi_modal_projector.2": "multi_modal_projector.linear_2",
"final_linear.0": "linear_1",
"final_linear.2": "linear_2",
"multi_modal_projector.clip_layernorm": "multi_modal_projector.projector_layernorm",
}
# Copied from transformers.models.llava.convert_llava_weights_to_hf.convert_state_dict_to_hf
def convert_state_dict_to_hf(state_dict):
new_state_dict = {}
for key, value in state_dict.items():
if key.endswith(".inv_freq"):
continue
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
new_state_dict[key] = value
return new_state_dict
def convert_vipllava_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id):
torch.set_default_dtype(torch.float16)
text_config = AutoConfig.from_pretrained(text_model_id)
tokenizer = AutoTokenizer.from_pretrained(text_model_id)
tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True)
tokenizer.add_special_tokens({"pad_token": "<pad>"})
image_processor = CLIPImageProcessor.from_pretrained(vision_model_id)
processor = LlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
config = VipLlavaConfig(text_config=text_config)
config.pad_token_id = 32001
with torch.device("meta"):
model = VipLlavaForConditionalGeneration(config)
# Pad to 64 for performance reasons
pad_shape = 64
state_dict_path = hf_hub_download(old_state_dict_id, "model_state_dict_7b.bin")
state_dict = torch.load(state_dict_path, map_location="cpu", weights_only=True)
state_dict = convert_state_dict_to_hf(state_dict)
model.load_state_dict(state_dict, strict=True, assign=True)
pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
mu = torch.mean(pre_expansion_embeddings, dim=0).float()
n = pre_expansion_embeddings.size()[0]
sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
# We add an image token so we resize the model
model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape)
model.language_model.model.embed_tokens.weight.data[32000:] = torch.stack(
tuple((dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[32000:].shape[0]))),
dim=0,
)
model.language_model.lm_head.weight.data[32000:] = torch.stack(
tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[32000:].shape[0]))),
dim=0,
)
model.push_to_hub(output_hub_path)
processor.push_to_hub(output_hub_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--text_model_id",
help="Hub location of the text model",
)
parser.add_argument(
"--vision_model_id",
help="Hub location of the vision model",
)
parser.add_argument(
"--output_hub_path",
help="Location on the hub of the converted model",
)
parser.add_argument(
"--old_state_dict_id",
help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`",
)
args = parser.parse_args()
convert_vipllava_llama_to_hf(
args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id
)
if __name__ == "__main__":
main()
|
from docarray import BaseDocument, DocumentArray
from docarray.base_document import AnyDocument
def test_generic_init():
class Text(BaseDocument):
text: str
da = DocumentArray[Text]([])
da.document_type == Text
assert isinstance(da, DocumentArray)
def test_normal_access_init():
da = DocumentArray([])
da.document_type == AnyDocument
assert isinstance(da, DocumentArray)
|
from docarray import BaseDocument, DocumentArray
from docarray.document import AnyDocument
def test_generic_init():
class Text(BaseDocument):
text: str
da = DocumentArray[Text]([])
da.document_type == Text
assert isinstance(da, DocumentArray)
def test_normal_access_init():
da = DocumentArray([])
da.document_type == AnyDocument
assert isinstance(da, DocumentArray)
|
_base_ = ['co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py']
model = dict(backbone=dict(drop_path_rate=0.5))
param_scheduler = [dict(type='MultiStepLR', milestones=[30])]
train_cfg = dict(max_epochs=36)
|
_base_ = ['co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py']
model = dict(backbone=dict(drop_path_rate=0.5))
param_scheduler = [dict(milestones=[30])]
train_cfg = dict(max_epochs=36)
|
"""Test ChatDeepSeek chat model."""
from typing import Optional, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is the square root of 256256?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError()
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is the square root of 256256?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
"""Test ChatDeepSeek chat model."""
from typing import Optional, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is the square root of 256256?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError()
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is the square root of 256256?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Sequence
import numpy as np
import torch
DATA_BATCH = Sequence[dict]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
seed: int) -> None:
"""This function will be called on each worker subprocess after seeding and
before data loading.
Args:
worker_id (int): Worker id in [0, num_workers - 1].
num_workers (int): How many subprocesses to use for data loading.
rank (int): Rank of process in distributed environment. If in
non-distributed environment, it is a constant number `0`.
seed (int): Random seed.
"""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
def pseudo_collate(data_batch: DATA_BATCH) -> DATA_BATCH:
"""The default behavior of dataloader is to merge a list of samples to form
a mini-batch of Tensor(s). However, in MMEngine, ``pseudo_collate`` does
nothing just returns ``data_batch``.
Args:
data_batch (Sequence[dict]): Batch of data from
dataloader.
Returns:
Sequence[dict]: Return input ``data_batch``.
"""
return data_batch
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Any, Sequence, Tuple
import numpy as np
import torch
from .base_data_element import BaseDataElement
DATA_BATCH = Sequence[Tuple[Any, BaseDataElement]]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
seed: int) -> None:
"""This function will be called on each worker subprocess after seeding and
before data loading.
Args:
worker_id (int): Worker id in [0, num_workers - 1].
num_workers (int): How many subprocesses to use for data loading.
rank (int): Rank of process in distributed environment. If in
non-distributed environment, it is a constant number `0`.
seed (int): Random seed.
"""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
def pseudo_collate(data_batch: DATA_BATCH) -> DATA_BATCH:
"""The default behavior of dataloader is to merge a list of samples to form
a mini-batch of Tensor(s). However, in MMEngine, ``pseudo_collate`` does
nothing just returns ``data_batch``.
Args:
data_batch (Sequence[Tuple[Any, BaseDataElement]]): Batch of data from
dataloader.
Returns:
Sequence[Tuple[Any, BaseDataElement]]: Return input ``data_batch``.
"""
return data_batch
|
import json
from typing import Any, Dict, List, Optional, Tuple
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.grpc import client_grpc_options
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.serve.helper import get_default_grpc_options
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
assert request.header.target_executor == ''
r_status, r_json = await iolet.send_message(url=f'http://localhost:{port}/post', request=request)
response = DataRequest(r_json)
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_http_clientlet_target():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
logger=logger
) as iolet:
request = _new_data_request('/', 'nothing', {'a': 'b'})
assert request.header.target_executor == 'nothing'
r = await iolet.send_message(url=f'http://localhost:{port}/post', request=request)
r_status, r_json = r
response = DataRequest(r_json)
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(ConnectionError):
async with WebsocketClientlet(url='ws://localhost:12345', logger=logger):
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with pytest.raises(BadServer):
with flow_with_exception_request as f:
f.post(
'',
on_done=on_done_mock,
on_error=on_error_mock,
on_always=on_always_mock,
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
def _get_grpc_service_config_json(
options: List[Tuple[str, Any]]
) -> Optional[Dict[str, Any]]:
for tup in options:
if tup[0] == 'grpc.service_config':
return json.loads(tup[1])
return None
@pytest.mark.parametrize('max_attempts', [-1, 1, 2])
@pytest.mark.parametrize('grpc_options', [None, {"grpc.keepalive_time_ms": 9999}])
def test_client_grpc_options(max_attempts, grpc_options):
default_options = get_default_grpc_options()
backoff_multiplier = 1.5
initial_backoff = 0.5
max_backoff = 5
options = client_grpc_options(
backoff_multiplier=backoff_multiplier,
initial_backoff=initial_backoff,
max_attempts=max_attempts,
max_backoff=max_backoff,
args_channel_options=grpc_options,
)
assert len(options) >= len(default_options)
if grpc_options and max_attempts <= 1:
assert len(default_options) + 1 == len(options)
elif grpc_options and max_attempts > 1:
assert len(default_options) + 3 == len(options)
elif not grpc_options and max_attempts <= 1:
assert len(options) == len(default_options)
elif not grpc_options and max_attempts > 1:
assert len(default_options) + 2 == len(options)
if max_attempts <= 1:
assert not _get_grpc_service_config_json(options)
else:
service_config_json = _get_grpc_service_config_json(options)
retry_config = service_config_json['methodConfig'][0]
assert retry_config['name'] == [{}]
assert retry_config['retryPolicy'] == {
'maxAttempts': max_attempts,
'initialBackoff': f'{initial_backoff}s',
'backoffMultiplier': backoff_multiplier,
'maxBackoff': f'{max_backoff}s',
'retryableStatusCodes': ['UNAVAILABLE', 'DEADLINE_EXCEEDED', 'INTERNAL'],
}
|
import json
from typing import Any, Dict, List, Optional, Tuple
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.grpc import client_grpc_options
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.serve.helper import get_default_grpc_options
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
assert request.header.target_executor == ''
r_status, r_json = await iolet.send_message(request)
response = DataRequest(r_json)
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_http_clientlet_target():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', 'nothing', {'a': 'b'})
assert request.header.target_executor == 'nothing'
r = await iolet.send_message(request)
r_status, r_json = r
response = DataRequest(r_json)
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(ConnectionError):
async with WebsocketClientlet(url='ws://localhost:12345', logger=logger):
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with pytest.raises(BadServer):
with flow_with_exception_request as f:
f.post(
'',
on_done=on_done_mock,
on_error=on_error_mock,
on_always=on_always_mock,
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
def _get_grpc_service_config_json(
options: List[Tuple[str, Any]]
) -> Optional[Dict[str, Any]]:
for tup in options:
if tup[0] == 'grpc.service_config':
return json.loads(tup[1])
return None
@pytest.mark.parametrize('max_attempts', [-1, 1, 2])
@pytest.mark.parametrize('grpc_options', [None, {"grpc.keepalive_time_ms": 9999}])
def test_client_grpc_options(max_attempts, grpc_options):
default_options = get_default_grpc_options()
backoff_multiplier = 1.5
initial_backoff = 0.5
max_backoff = 5
options = client_grpc_options(
backoff_multiplier=backoff_multiplier,
initial_backoff=initial_backoff,
max_attempts=max_attempts,
max_backoff=max_backoff,
args_channel_options=grpc_options,
)
assert len(options) >= len(default_options)
if grpc_options and max_attempts <= 1:
assert len(default_options) + 1 == len(options)
elif grpc_options and max_attempts > 1:
assert len(default_options) + 3 == len(options)
elif not grpc_options and max_attempts <= 1:
assert len(options) == len(default_options)
elif not grpc_options and max_attempts > 1:
assert len(default_options) + 2 == len(options)
if max_attempts <= 1:
assert not _get_grpc_service_config_json(options)
else:
service_config_json = _get_grpc_service_config_json(options)
retry_config = service_config_json['methodConfig'][0]
assert retry_config['name'] == [{}]
assert retry_config['retryPolicy'] == {
'maxAttempts': max_attempts,
'initialBackoff': f'{initial_backoff}s',
'backoffMultiplier': backoff_multiplier,
'maxBackoff': f'{max_backoff}s',
'retryableStatusCodes': ['UNAVAILABLE', 'DEADLINE_EXCEEDED', 'INTERNAL'],
}
|
import pytest
from langchain.evaluation.parsing.json_distance import JsonEditDistanceEvaluator
@pytest.fixture
def json_distance_evaluator() -> JsonEditDistanceEvaluator:
return JsonEditDistanceEvaluator()
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_requires_input(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
assert json_distance_evaluator.requires_input is False
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_requires_reference(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
assert json_distance_evaluator.requires_reference is True
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluation_name(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
assert json_distance_evaluator.evaluation_name == "json_edit_distance"
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_parse_json(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
string = '{"a": 1}'
result = json_distance_evaluator._parse_json(string)
assert result == {"a": 1}
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_simple_diff(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '{"a": 1}'
reference = '{"a": 2}'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
# Only 1 character flipped
pytest.approx(1 / 7, result["score"])
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_complex_diff(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '{"a":1, "b": {"c": 2, "d": 3}}'
reference = '{"a": 1, "b": {"c": 2, "d": 4}}'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
# Only 1 character flipped
pytest.approx(1 / len(reference.replace(" ", "")), result["score"])
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_list_diff(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}, {"a": 2, "b": 4}]'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
# Again only 1 character flipped
pytest.approx(1 / len(reference.replace(" ", "")), result["score"])
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_list_same(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"b": 2, "a": 1}, {"b": 3, "a": 2}]'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_list_diff_length(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}]'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
pytest.approx(
len('{"a":2,"b":3}') / len(reference.replace(" ", "")),
result["score"],
)
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_custom_operator_equal() -> None:
"""Custom operator that returns 0.5 if strings are different."""
def custom_distance(a: str, b: str) -> float:
return 0.5 if a != b else 0.0
evaluator = JsonEditDistanceEvaluator(string_distance=custom_distance)
prediction = '{"a": "apple", "b": "banana"}'
reference = '{"a": "apple", "b": "berries"}'
result = evaluator._evaluate_strings(prediction=prediction, reference=reference)
assert result["score"] == 0.5
|
import pytest
from langchain.evaluation.parsing.json_distance import JsonEditDistanceEvaluator
@pytest.fixture
def json_distance_evaluator() -> JsonEditDistanceEvaluator:
return JsonEditDistanceEvaluator()
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_requires_input(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
assert json_distance_evaluator.requires_input is False
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_requires_reference(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
assert json_distance_evaluator.requires_reference is True
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluation_name(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
assert json_distance_evaluator.evaluation_name == "json_edit_distance"
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_parse_json(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
string = '{"a": 1}'
result = json_distance_evaluator._parse_json(string)
assert result == {"a": 1}
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_simple_diff(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '{"a": 1}'
reference = '{"a": 2}'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
# Only 1 character flipped
pytest.approx(1 / 7, result["score"])
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_complex_diff(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '{"a":1, "b": {"c": 2, "d": 3}}'
reference = '{"a": 1, "b": {"c": 2, "d": 4}}'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
# Only 1 character flipped
pytest.approx(1 / len(reference.replace(" ", "")), result["score"])
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_list_diff(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}, {"a": 2, "b": 4}]'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
# Again only 1 character flipped
pytest.approx(1 / len(reference.replace(" ", "")), result["score"])
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_list_same(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"b": 2, "a": 1}, {"b": 3, "a": 2}]'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_list_diff_length(
json_distance_evaluator: JsonEditDistanceEvaluator,
) -> None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}]'
result = json_distance_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
pytest.approx(
len('{"a":2,"b":3}') / len(reference.replace(" ", "")), result["score"]
)
@pytest.mark.requires("rapidfuzz")
def test_json_distance_evaluator_evaluate_strings_custom_operator_equal() -> None:
"""Custom operator that returns 0.5 if strings are different."""
def custom_distance(a: str, b: str) -> float:
return 0.5 if a != b else 0.0
evaluator = JsonEditDistanceEvaluator(string_distance=custom_distance)
prediction = '{"a": "apple", "b": "banana"}'
reference = '{"a": "apple", "b": "berries"}'
result = evaluator._evaluate_strings(prediction=prediction, reference=reference)
assert result["score"] == 0.5
|
import os
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
.. note::
The partition only changes which split each image belongs to. Thus, regardless of the selected
partition, combining all splits will result in all images.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
_MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
partition: int = 1,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[Union[str, pathlib.Path]], Any] = default_loader,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
if not isinstance(partition, int) and not (1 <= partition <= 10):
raise ValueError(
f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
f"but got {partition} instead"
)
self._partition = partition
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / type(self).__name__.lower()
self._data_folder = self._base_folder / "dtd"
self._meta_folder = self._data_folder / "labels"
self._images_folder = self._data_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._image_files = []
classes = []
with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
for line in file:
cls, name = line.strip().split("/")
self._image_files.append(self._images_folder.joinpath(cls, name))
classes.append(cls)
self.classes = sorted(set(classes))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._labels = [self.class_to_idx[cls] for cls in classes]
self.loader = loader
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}, partition={self._partition}"
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
|
import os
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
.. note::
The partition only changes which split each image belongs to. Thus, regardless of the selected
partition, combining all splits will result in all images.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
_MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
partition: int = 1,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
if not isinstance(partition, int) and not (1 <= partition <= 10):
raise ValueError(
f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
f"but got {partition} instead"
)
self._partition = partition
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / type(self).__name__.lower()
self._data_folder = self._base_folder / "dtd"
self._meta_folder = self._data_folder / "labels"
self._images_folder = self._data_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._image_files = []
classes = []
with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
for line in file:
cls, name = line.strip().split("/")
self._image_files.append(self._images_folder.joinpath(cls, name))
classes.append(cls)
self.classes = sorted(set(classes))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._labels = [self.class_to_idx[cls] for cls in classes]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}, partition={self._partition}"
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker, is_qdm: bool
) -> None:
workers = tm.get_client_workers(client)
args = await client.sync(
dxgb._get_rabit_args,
len(workers),
None,
client,
)
n_workers = len(workers)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cpu",
comm_args=args,
is_qdm=is_qdm,
)
await client.gather(futs)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker, is_qdm: bool
) -> None:
workers = tm.get_client_workers(client)
args = await client.sync(
xgb.dask._get_rabit_args,
len(workers),
None,
client,
)
n_workers = len(workers)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cpu",
comm_args=args,
is_qdm=is_qdm,
)
await client.gather(futs)
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / "VERSION.txt"
if _version_path.is_file():
__version__ = _version_path.read_text(encoding="utf-8").strip()
__all__ = [
"Dataset",
"Booster",
"CVBooster",
"Sequence",
"register_logger",
"train",
"cv",
"LGBMModel",
"LGBMRegressor",
"LGBMClassifier",
"LGBMRanker",
"DaskLGBMRegressor",
"DaskLGBMClassifier",
"DaskLGBMRanker",
"log_evaluation",
"record_evaluation",
"reset_parameter",
"early_stopping",
"EarlyStopException",
"plot_importance",
"plot_split_value_histogram",
"plot_metric",
"plot_tree",
"create_tree_digraph",
]
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / 'VERSION.txt'
if _version_path.is_file():
__version__ = _version_path.read_text(encoding='utf-8').strip()
__all__ = ['Dataset', 'Booster', 'CVBooster', 'Sequence',
'register_logger',
'train', 'cv',
'LGBMModel', 'LGBMRegressor', 'LGBMClassifier', 'LGBMRanker',
'DaskLGBMRegressor', 'DaskLGBMClassifier', 'DaskLGBMRanker',
'log_evaluation', 'record_evaluation', 'reset_parameter', 'early_stopping', 'EarlyStopException',
'plot_importance', 'plot_split_value_histogram', 'plot_metric', 'plot_tree', 'create_tree_digraph']
|
from docarray import BaseDoc, DocArray
def test_instance_and_equivalence():
class MyDoc(BaseDoc):
text: str
docs = DocArray[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocArray[MyDoc], DocArray[MyDoc])
assert issubclass(docs.__class__, DocArray[MyDoc])
assert isinstance(docs, DocArray[MyDoc])
def test_subclassing():
class MyDoc(BaseDoc):
text: str
class MyDocArray(DocArray[MyDoc]):
pass
docs = MyDocArray([MyDoc(text='hello')])
assert issubclass(MyDocArray, DocArray[MyDoc])
assert issubclass(docs.__class__, DocArray[MyDoc])
assert isinstance(docs, MyDocArray)
assert isinstance(docs, DocArray[MyDoc])
assert issubclass(MyDoc, BaseDoc)
assert not issubclass(DocArray[MyDoc], DocArray[BaseDoc])
assert not issubclass(MyDocArray, DocArray[BaseDoc])
|
from docarray import BaseDocument, DocumentArray
def test_instance_and_equivalence():
class MyDoc(BaseDocument):
text: str
docs = DocumentArray[MyDoc]([MyDoc(text='hello')])
assert issubclass(DocumentArray[MyDoc], DocumentArray[MyDoc])
assert issubclass(docs.__class__, DocumentArray[MyDoc])
assert isinstance(docs, DocumentArray[MyDoc])
def test_subclassing():
class MyDoc(BaseDocument):
text: str
class MyDocArray(DocumentArray[MyDoc]):
pass
docs = MyDocArray([MyDoc(text='hello')])
assert issubclass(MyDocArray, DocumentArray[MyDoc])
assert issubclass(docs.__class__, DocumentArray[MyDoc])
assert isinstance(docs, MyDocArray)
assert isinstance(docs, DocumentArray[MyDoc])
assert issubclass(MyDoc, BaseDocument)
assert not issubclass(DocumentArray[MyDoc], DocumentArray[BaseDocument])
assert not issubclass(MyDocArray, DocumentArray[BaseDocument])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackReIDInputs,
PackTrackInputs, ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixScaleResize, FixShapeResize,
MinIoURandomCrop, MixUp, Mosaic, Pad,
PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, ResizeShortestEdge,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs', 'PackReIDInputs', 'FixScaleResize', 'ResizeShortestEdge'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackReIDInputs,
PackTrackInputs, ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadSemSegAnnotations,
LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixScaleResize, FixShapeResize,
MinIoURandomCrop, MixUp, Mosaic, Pad,
PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs', 'PackReIDInputs', 'FixScaleResize',
'LoadSemSegAnnotations'
]
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
# last_chat_response will be used later, after the loop.
# We initialize it so it's valid even when 'response' is empty
last_chat_response = ChatResponse(message=ChatMessage())
async for last_chat_response in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
ctx.write_event_to_stream(
AgentStream(
delta=last_chat_response.delta or "",
response=last_chat_response.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(last_chat_response.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
return AgentOutput(
response=last_chat_response.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if (
tool_call_result.return_direct
and tool_call_result.tool_name != "handoff"
):
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for msg in scratchpad:
await memory.aput(msg)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
async for r in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
r, error_on_no_tool_call=False
)
raw = r.raw.model_dump() if isinstance(r.raw, BaseModel) else r.raw
ctx.write_event_to_stream(
AgentStream(
delta=r.delta or "",
response=r.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
r, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(r.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = r.raw.model_dump() if isinstance(r.raw, BaseModel) else r.raw
return AgentOutput(
response=r.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if (
tool_call_result.return_direct
and tool_call_result.tool_name != "handoff"
):
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for msg in scratchpad:
await memory.aput(msg)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import IS_THREAD_SAFE
from keras.src.backend.torch.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import IS_THREAD_SAFE
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
# Copyright (c) OpenMMLab. All rights reserved.
_base_ = 'mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
|
# Copyright (c) OpenMMLab. All rights reserved.
_base_ = 'mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.2.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.1.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Callable, Optional, cast
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata)
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Callable, Optional, cast
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata)
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata) # type: ignore[arg-type]
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
from typing import BinaryIO, Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
buffer_size: int = 4096,
) -> AudioMetaData:
s = torchaudio.lib._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: BinaryIO,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
s = torchaudio.lib._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
from typing import BinaryIO, Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
buffer_size: int = 4096,
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: BinaryIO,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_to_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_to_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_to_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_to_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
# learning policy
lr_config = dict(step=[20, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 480), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 480), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
num_classes=8,
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optim_wrapper = dict(optimizer=dict(lr=0.01))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=8,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
# actual epoch = 8 * 8 = 64
train_cfg = dict(max_epochs=8)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
num_classes=8,
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optim_wrapper = dict(optimizer=dict(lr=0.01))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=8,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
# actual epoch = 8 * 8 = 64
train_cfg = dict(max_epochs=8)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"])
class MaxPooling2D(BasePooling):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> max_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> max_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> max_pool_2d(x)
"""
def __init__(
self,
pool_size=(2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"])
class MaxPooling2D(BasePooling):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> max_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> max_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> max_pool_2d(x)
"""
def __init__(
self,
pool_size=(2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from .image_url import ImageUrl
__all__ = ['ImageUrl']
|
from .image_url import ImageUrl
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
import librosa
import pytest
from executor.audio_clip_encoder import AudioCLIPEncoder
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow(return_results=True).add(uses=AudioCLIPEncoder) as f:
resp = f.post(on='/test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sr = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
doc = DocumentArray([Document(blob=x_audio, tags={'sample_rate': sr})])
with Flow().add(uses=AudioCLIPEncoder) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
assert responses[0].docs[0].embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
'download_model:True',
],
timeout=30,
check=True,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
import librosa
import pytest
from executor.audio_clip_encoder import AudioCLIPEncoder
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow(return_results=True).add(uses=AudioCLIPEncoder) as f:
resp = f.post(on='/test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sr = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
doc = DocumentArray([Document(blob=x_audio, tags={'sample_rate': sr})])
with Flow().add(uses=AudioCLIPEncoder) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
assert responses[0].docs[0].embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__version__ = '0.13.9'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.8'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "uint8", "ubinary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using FAISS
rescore_multiplier = 4
results, search_time = semantic_search_faiss(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "uint8", "ubinary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using FAISS
rescore_multiplier = 4
results, search_time = semantic_search_faiss(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.data import InstanceData
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('visualizer')
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
self.data_batch = [{'data_sample': data_sample}] * 2
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, self.data_batch, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.data import InstanceData
from mmdet.data_elements import DetDataSample
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('visualizer')
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
self.data_batch = [{'data_sample': data_sample}] * 2
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, self.data_batch, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
from ._source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
"SourceSeparationBundle",
"CONVTASNET_BASE_LIBRI2MIX",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
from ._source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, SourceSeparationBundle
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
"SourceSeparationBundle",
"CONVTASNET_BASE_LIBRI2MIX",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss'
]
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_optimum_quanto_available,
is_optimum_quanto_version,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchao_version,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .remote_utils import remote_decode
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
from .typing_utils import _get_detailed_type, _is_valid_type
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_optimum_quanto_available,
is_optimum_quanto_version,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .remote_utils import remote_decode
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
from .typing_utils import _get_detailed_type, _is_valid_type
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from unittest.mock import patch
from mmengine.hooks import IterTimerHook
from mmengine.testing import RunnerTestCase
class patched_time:
count = 0
@classmethod
def time(cls):
result = cls.count
cls.count += 1
return result
class TestIterTimerHook(RunnerTestCase):
@patch('mmengine.hooks.iter_timer_hook.time', patched_time)
def test_before_iter(self):
runner = self.build_runner(self.epoch_based_cfg)
hook = self._get_iter_timer_hook(runner)
for mode in ('train', 'val', 'test'):
hook._before_epoch(runner)
hook._before_iter(runner, batch_idx=1, mode=mode)
time = runner.message_hub.get_scalar(
f'{mode}/data_time')._log_history
self.assertEqual(list(time)[-1], 1)
@patch('mmengine.hooks.iter_timer_hook.time', patched_time)
def test_after_iter(self):
cfg = copy.deepcopy(self.iter_based_cfg)
cfg.train_cfg.max_iters = 100
runner = self.build_runner(cfg)
hook = self._get_iter_timer_hook(runner)
hook.before_run(runner)
hook._before_epoch(runner)
# 4 iteration per epoch, totally 2 epochs
# Under pathced_time, before_iter will cost "1s" and after_iter will
# cost "1s", so the total time for each iteration is 2s.
for i in range(10):
hook.before_train_iter(runner, i)
hook.after_train_iter(runner, i)
runner.train_loop._iter += 1
# Left 90 iterations, so the ETA should be 90 * 2s
self.assertEqual(runner.message_hub.get_info('eta'), 180)
hook.after_train_epoch(runner)
for i in range(2):
hook.before_val_iter(runner, i)
hook.after_val_iter(runner, batch_idx=i)
self.assertEqual(runner.message_hub.get_info('eta'), 4)
for i in range(2, 4):
hook.before_val_iter(runner, i)
hook.after_val_iter(runner, batch_idx=i)
hook.after_val_epoch(runner)
self.assertEqual(runner.message_hub.get_info('eta'), 0)
for i in range(2):
hook.before_test_iter(runner, i)
hook.after_test_iter(runner, batch_idx=i)
self.assertEqual(runner.message_hub.get_info('eta'), 4)
for i in range(2, 4):
hook.before_test_iter(runner, i)
hook.after_test_iter(runner, batch_idx=i)
hook.after_test_epoch(runner)
self.assertEqual(runner.message_hub.get_info('eta'), 0)
def test_with_runner(self):
cfg = copy.deepcopy(self.epoch_based_cfg)
runner = self.build_runner(cfg)
cfg.train_cfg.val_interval = 1e6 # disable validation
with patch('mmengine.hooks.iter_timer_hook.time', patched_time):
runner.train()
# 4 iteration per epoch, totally 2 epochs
# Under pathced_time, before_iter will cost "1s" and after_iter will
# cost "1s", so the total time for each iteration is 2s.
train_time = runner.message_hub.log_scalars['train/time']._log_history
self.assertEqual(len(train_time), 8)
self.assertListEqual(list(train_time), [2] * 8)
eta = runner.message_hub.runtime_info['eta']
self.assertEqual(eta, 0)
def _get_iter_timer_hook(self, runner):
for hook in runner.hooks:
if isinstance(hook, IterTimerHook):
return hook
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_patch.time += 1
return time_patch.time
class TestIterTimerHook(TestCase):
def setUp(self) -> None:
self.hook = IterTimerHook()
def test_init(self):
assert self.hook.time_sec_tot == 0
assert self.hook.start_iter == 0
def test_before_train(self):
runner = MagicMock()
runner.iter = 1
self.hook.before_train(runner)
assert self.hook.start_iter == 1
def test_before_epoch(self):
runner = Mock()
self.hook._before_epoch(runner)
assert isinstance(self.hook.t, float)
@patch('time.time', MagicMock(return_value=1))
def test_before_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
self.hook._before_epoch(runner)
for mode in ('train', 'val', 'test'):
self.hook._before_iter(runner, batch_idx=1, mode=mode)
runner.message_hub.update_scalar.assert_called_with(
f'{mode}/data_time', 0)
@patch('time.time', time_patch)
def test_after_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
runner.log_processor.window_size = 10
runner.max_iters = 100
runner.iter = 0
runner.test_dataloader = [0] * 20
runner.val_dataloader = [0] * 20
runner.message_hub = MessageHub.get_instance('test_iter_timer_hook')
self.hook.before_run(runner)
self.hook._before_epoch(runner)
# eta = (100 - 10) / 1
for _ in range(10):
self.hook._after_iter(runner, 1)
runner.iter += 1
assert runner.message_hub.get_info('eta') == 90
for i in range(10):
self.hook._after_iter(runner, batch_idx=i, mode='val')
assert runner.message_hub.get_info('eta') == 10
for i in range(11, 20):
self.hook._after_iter(runner, batch_idx=i, mode='val')
assert runner.message_hub.get_info('eta') == 0
self.hook.after_val_epoch(runner)
for i in range(10):
self.hook._after_iter(runner, batch_idx=i, mode='test')
assert runner.message_hub.get_info('eta') == 10
for i in range(11, 20):
self.hook._after_iter(runner, batch_idx=i, mode='test')
assert runner.message_hub.get_info('eta') == 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers import AutoProcessor, AutoTokenizer, LlamaTokenizerFast, LlavaProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import CLIPImageProcessor
@require_vision
class LlavaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = CLIPImageProcessor(do_center_crop=False)
tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaProcessor(image_processor, tokenizer, **processor_kwargs)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}",
"patch_size": 128,
"vision_feature_select_strategy": "default"
} # fmt: skip
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_can_load_various_tokenizers(self):
for checkpoint in ["Intel/llava-gemma-2b", "llava-hf/llava-1.5-7b-hf"]:
processor = LlavaProcessor.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = LlavaProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=5,
)
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers import AutoProcessor, AutoTokenizer, LlamaTokenizerFast, LlavaProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import CLIPImageProcessor
if is_torch_available:
pass
@require_vision
class LlavaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = CLIPImageProcessor(do_center_crop=False)
tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaProcessor(image_processor, tokenizer, **processor_kwargs)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}",
"patch_size": 128,
"vision_feature_select_strategy": "default"
} # fmt: skip
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_can_load_various_tokenizers(self):
for checkpoint in ["Intel/llava-gemma-2b", "llava-hf/llava-1.5-7b-hf"]:
processor = LlavaProcessor.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = LlavaProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=5,
)
|
from langchain_core.prompts import PromptTemplate
from langchain.output_parsers.regex import RegexParser
output_parser = RegexParser(
regex=r"(.*?)\nScore: (\d*)",
output_keys=["answer", "score"],
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
In addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:
Question: [question here]
Helpful Answer: [answer here]
Score: [score between 0 and 100]
How to determine the score:
- Higher is a better answer
- Better responds fully to the asked question, with sufficient level of detail
- If you do not know the answer based on the context, that should be a score of 0
- Don't be overconfident!
Example #1
Context:
---------
Apples are red
---------
Question: what color are apples?
Helpful Answer: red
Score: 100
Example #2
Context:
---------
it was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv
---------
Question: what type was the car?
Helpful Answer: a sports car or an suv
Score: 60
Example #3
Context:
---------
Pears are either red or orange
---------
Question: what color are apples?
Helpful Answer: This document does not answer the question
Score: 0
Begin!
Context:
---------
{context}
---------
Question: {question}
Helpful Answer:""" # noqa: E501
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["context", "question"],
output_parser=output_parser,
)
|
# flake8: noqa
from langchain.output_parsers.regex import RegexParser
from langchain_core.prompts import PromptTemplate
output_parser = RegexParser(
regex=r"(.*?)\nScore: (\d*)",
output_keys=["answer", "score"],
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
In addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:
Question: [question here]
Helpful Answer: [answer here]
Score: [score between 0 and 100]
How to determine the score:
- Higher is a better answer
- Better responds fully to the asked question, with sufficient level of detail
- If you do not know the answer based on the context, that should be a score of 0
- Don't be overconfident!
Example #1
Context:
---------
Apples are red
---------
Question: what color are apples?
Helpful Answer: red
Score: 100
Example #2
Context:
---------
it was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv
---------
Question: what type was the car?
Helpful Answer: a sports car or an suv
Score: 60
Example #3
Context:
---------
Pears are either red or orange
---------
Question: what color are apples?
Helpful Answer: This document does not answer the question
Score: 0
Begin!
Context:
---------
{context}
---------
Question: {question}
Helpful Answer:"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["context", "question"],
output_parser=output_parser,
)
|
import json
import os
import zlib
from typing import Callable, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
text_bytes = text.encode("utf-8")
return len(text_bytes) / len(zlib.compress(text_bytes))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
class ResultWriter:
extension: str
def __init__(self, output_dir: str):
self.output_dir = output_dir
def __call__(self, result: dict, audio_path: str):
audio_basename = os.path.basename(audio_path)
output_path = os.path.join(self.output_dir, audio_basename + "." + self.extension)
with open(output_path, "w", encoding="utf-8") as f:
self.write_result(result, file=f)
def write_result(self, result: dict, file: TextIO):
raise NotImplementedError
class WriteTXT(ResultWriter):
extension: str = "txt"
def write_result(self, result: dict, file: TextIO):
for segment in result["segments"]:
print(segment['text'].strip(), file=file, flush=True)
class WriteVTT(ResultWriter):
extension: str = "vtt"
def write_result(self, result: dict, file: TextIO):
print("WEBVTT\n", file=file)
for segment in result["segments"]:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteSRT(ResultWriter):
extension: str = "srt"
def write_result(self, result: dict, file: TextIO):
for i, segment in enumerate(result["segments"], start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteTSV(ResultWriter):
"""
Write a transcript to a file in TSV (tab-separated values) format containing lines like:
<start time in integer milliseconds>\t<end time in integer milliseconds>\t<transcript text>
Using integer milliseconds as start and end times means there's no chance of interference from
an environment setting a language encoding that causes the decimal in a floating point number
to appear as a comma; also is faster and more efficient to parse & store, e.g., in C++.
"""
extension: str = "tsv"
def write_result(self, result: dict, file: TextIO):
print("start", "end", "text", sep="\t", file=file)
for segment in result["segments"]:
print(round(1000 * segment['start']), file=file, end="\t")
print(round(1000 * segment['end']), file=file, end="\t")
print(segment['text'].strip().replace("\t", " "), file=file, flush=True)
class WriteJSON(ResultWriter):
extension: str = "json"
def write_result(self, result: dict, file: TextIO):
json.dump(result, file)
def get_writer(output_format: str, output_dir: str) -> Callable[[dict, TextIO], None]:
writers = {
"txt": WriteTXT,
"vtt": WriteVTT,
"srt": WriteSRT,
"tsv": WriteTSV,
"json": WriteJSON,
}
if output_format == "all":
all_writers = [writer(output_dir) for writer in writers.values()]
def write_all(result: dict, file: TextIO):
for writer in all_writers:
writer(result, file)
return write_all
return writers[output_format](output_dir)
|
import json
import os
import zlib
from typing import Callable, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
text_bytes = text.encode("utf-8")
return len(text_bytes) / len(zlib.compress(text_bytes))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
class ResultWriter:
extension: str
def __init__(self, output_dir: str):
self.output_dir = output_dir
def __call__(self, result: dict, audio_path: str):
audio_basename = os.path.basename(audio_path)
output_path = os.path.join(self.output_dir, audio_basename + "." + self.extension)
with open(output_path, "w", encoding="utf-8") as f:
self.write_result(result, file=f)
def write_result(self, result: dict, file: TextIO):
raise NotImplementedError
class WriteTXT(ResultWriter):
extension: str = "txt"
def write_result(self, result: dict, file: TextIO):
for segment in result["segments"]:
print(segment['text'].strip(), file=file, flush=True)
class WriteVTT(ResultWriter):
extension: str = "vtt"
def write_result(self, result: dict, file: TextIO):
print("WEBVTT\n", file=file)
for segment in result["segments"]:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteSRT(ResultWriter):
extension: str = "srt"
def write_result(self, result: dict, file: TextIO):
for i, segment in enumerate(result["segments"], start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteJSON(ResultWriter):
extension: str = "json"
def write_result(self, result: dict, file: TextIO):
json.dump(result, file)
def get_writer(output_format: str, output_dir: str) -> Callable[[dict, TextIO], None]:
writers = {
"txt": WriteTXT,
"vtt": WriteVTT,
"srt": WriteSRT,
"json": WriteJSON,
}
if output_format == "all":
all_writers = [writer(output_dir) for writer in writers.values()]
def write_all(result: dict, file: TextIO):
for writer in all_writers:
writer(result, file)
return write_all
return writers[output_format](output_dir)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='FasterRCNN',
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
rpn_head=dict(
type='RPNHead',
anchor_generator=dict(
type='LegacyAnchorGenerator',
center_offset=0.5,
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn_proposal=dict(max_per_img=2000),
rcnn=dict(assigner=dict(match_low_quality=True))))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='FasterRCNN',
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
rpn_head=dict(
type='RPNHead',
anchor_generator=dict(
type='LegacyAnchorGenerator',
center_offset=0.5,
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn_proposal=dict(max_per_img=2000),
rcnn=dict(assigner=dict(match_low_quality=True))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.