code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "deit"
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=768 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Dict=3_072 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : str=1e-1_2 , SCREAMING_SNAKE_CASE__ : int=224 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[str]=16 , **SCREAMING_SNAKE_CASE__ : Any , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = encoder_stride
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def a ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a ( self : Dict ) -> float:
return 1e-4
| 61 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = PriorTransformer
UpperCamelCase_ : Union[str, Any] = '''hidden_states'''
@property
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = 4
SCREAMING_SNAKE_CASE : Dict = 8
SCREAMING_SNAKE_CASE : List[str] = 7
SCREAMING_SNAKE_CASE : str = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple=0 ):
torch.manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : int = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A ( self : Optional[Any] ):
return (4, 8)
@property
def _A ( self : Any ):
return (4, 8)
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
SCREAMING_SNAKE_CASE : Dict = self.dummy_input
return init_dict, inputs_dict
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = self.model_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
SCREAMING_SNAKE_CASE : str = model.to(UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , "set_default_attn_processor" ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = output[0, :5].flatten().cpu()
print(UpperCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : str , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : List[str]=77 , UpperCAmelCase_ : List[Any]=0 ):
torch.manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : int = embedding_dim
SCREAMING_SNAKE_CASE : Optional[Any] = num_embeddings
SCREAMING_SNAKE_CASE : Tuple = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def _A ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : int = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_seed_input(seed=UpperCAmelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**UpperCAmelCase_ )[0]
assert list(sample.shape ) == [1, 768]
SCREAMING_SNAKE_CASE : Optional[int] = sample[0, :8].flatten().cpu()
print(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
| 62 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a : Any = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
a : Any = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
def remove_articles(__lowerCamelCase : List[str] ):
__UpperCAmelCase : List[str] = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__lowerCamelCase , """ """ , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Any ):
__UpperCAmelCase : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )]
return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 100
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
__UpperCAmelCase : Optional[int] = Counter(__lowerCamelCase )
__UpperCAmelCase : List[Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : str = Counter()
for sgram, scount in sgramcounter.items():
__UpperCAmelCase : int = scount * numref
__UpperCAmelCase : Union[str, Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
__UpperCAmelCase : str = ccount * numref
# KEEP
__UpperCAmelCase : Dict = sgramcounter_rep & cgramcounter_rep
__UpperCAmelCase : str = keepgramcounter_rep & rgramcounter
__UpperCAmelCase : Optional[Any] = sgramcounter_rep & rgramcounter
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : int = 1
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Optional[int] = keeptmpscorea / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__UpperCAmelCase : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__UpperCAmelCase : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__UpperCAmelCase : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__UpperCAmelCase : List[str] = sgramcounter_rep - cgramcounter_rep
__UpperCAmelCase : Union[str, Any] = delgramcounter_rep - rgramcounter
__UpperCAmelCase : Union[str, Any] = sgramcounter_rep - rgramcounter
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Any = deltmpscorea / len(__lowerCamelCase )
# ADDITION
__UpperCAmelCase : Optional[int] = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : List[str] = set(__lowerCamelCase ) & set(__lowerCamelCase )
__UpperCAmelCase : Tuple = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : Any = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : str = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Dict = addtmpscore / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : str = addtmpscore / len(__lowerCamelCase )
__UpperCAmelCase : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
__UpperCAmelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[int] = len(__lowerCamelCase )
__UpperCAmelCase : Any = ssent.split(""" """ )
__UpperCAmelCase : List[str] = csent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[int] = []
for rsent in rsents:
__UpperCAmelCase : List[str] = rsent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : int = []
__UpperCAmelCase : str = []
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[Any] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Tuple = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Any = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Dict = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Tuple = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : int = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__UpperCAmelCase : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4
__UpperCAmelCase : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
__UpperCAmelCase : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : bool = True , __lowerCamelCase : str = "13a" , __lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__UpperCAmelCase : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__UpperCAmelCase : List[str] = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase )
elif tokenizer == "moses":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase )
elif tokenizer == "penn":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase )
else:
__UpperCAmelCase : str = sentence
if not return_str:
__UpperCAmelCase : Optional[int] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int ):
if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
__UpperCAmelCase : Union[str, Any] = 0
for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] )
__UpperCAmelCase : List[str] = sari_score / len(__lowerCamelCase )
return 100 * sari_score
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]="exp" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=False , ):
__UpperCAmelCase : Optional[int] = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__UpperCAmelCase : Optional[Any] = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
__UpperCAmelCase : Union[str, Any] = sacrebleu.corpus_bleu(
__lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase ( self : int , __lowercase : str , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]:
__UpperCAmelCase : str = {}
result.update({"""sari""": compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({"""exact""": compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 63 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ : List[Any] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def A__ ( snake_case_ : str = "mumbai" ):
SCREAMING_SNAKE_CASE__: List[str]= BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
SCREAMING_SNAKE_CASE__: List[str]= job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
SCREAMING_SNAKE_CASE__: Optional[int]= job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 64 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = metric_id
class __lowercase :
snake_case_ = [MetricMock(__lowerCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if "tmp_path" in args:
UpperCAmelCase__ : Any = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(__UpperCamelCase , match="""https://huggingface.co/docs/evaluate""" ):
func(*__UpperCamelCase )
| 65 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''data2vec-text'''
def __init__( self : Any ,__A : Union[str, Any]=3_0522 ,__A : List[Any]=768 ,__A : Dict=12 ,__A : str=12 ,__A : str=3072 ,__A : int="gelu" ,__A : Any=0.1 ,__A : Optional[int]=0.1 ,__A : Tuple=512 ,__A : Optional[Any]=2 ,__A : str=0.02 ,__A : List[str]=1e-12 ,__A : List[str]=1 ,__A : Tuple=0 ,__A : Any=2 ,__A : Optional[Any]="absolute" ,__A : Tuple=True ,__A : Tuple=None ,**__A : Tuple ,) -> Tuple:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self : str , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =size if size is not None else {"""height""": 384, """width""": 384}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =resample
__UpperCAmelCase =do_rescale
__UpperCAmelCase =rescale_factor
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase =do_convert_rgb
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__UpperCAmelCase =(size["""height"""], size["""width"""])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> Dict:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase =do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase =resample if resample is not None else self.resample
__UpperCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase =image_std if image_std is not None else self.image_std
__UpperCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase =size if size is not None else self.size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase =[convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase =[to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__UpperCAmelCase =[self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__UpperCAmelCase =[self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__UpperCAmelCase =[self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =[to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =BatchFeature(data={"""pixel_values""": images} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 68 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Dict = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
lowerCamelCase : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''tapas'''
def __init__( self : Dict , A_ : int=30522 , A_ : int=768 , A_ : Any=12 , A_ : Tuple=12 , A_ : Optional[Any]=3072 , A_ : Optional[Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : Dict=0.1 , A_ : Optional[Any]=1024 , A_ : Tuple=[3, 256, 256, 2, 256, 256, 10] , A_ : Optional[Any]=0.02 , A_ : str=1E-12 , A_ : str=0 , A_ : Tuple=10.0 , A_ : int=0 , A_ : int=1.0 , A_ : Any=None , A_ : List[str]=1.0 , A_ : Optional[Any]=False , A_ : List[Any]=None , A_ : Optional[int]=1.0 , A_ : Union[str, Any]=1.0 , A_ : List[str]=False , A_ : Optional[int]=False , A_ : Any="ratio" , A_ : List[str]=None , A_ : Dict=None , A_ : int=64 , A_ : Union[str, Any]=32 , A_ : Dict=False , A_ : List[str]=True , A_ : int=False , A_ : Tuple=False , A_ : Dict=True , A_ : Tuple=False , A_ : Optional[Any]=None , A_ : Union[str, Any]=None , **A_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_sizes
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCamelCase_ = positive_label_weight
lowerCamelCase_ = num_aggregation_labels
lowerCamelCase_ = aggregation_loss_weight
lowerCamelCase_ = use_answer_as_supervision
lowerCamelCase_ = answer_loss_importance
lowerCamelCase_ = use_normalized_answer_loss
lowerCamelCase_ = huber_loss_delta
lowerCamelCase_ = temperature
lowerCamelCase_ = aggregation_temperature
lowerCamelCase_ = use_gumbel_for_cells
lowerCamelCase_ = use_gumbel_for_aggregation
lowerCamelCase_ = average_approximation_function
lowerCamelCase_ = cell_selection_preference
lowerCamelCase_ = answer_loss_cutoff
lowerCamelCase_ = max_num_rows
lowerCamelCase_ = max_num_columns
lowerCamelCase_ = average_logits_per_cell
lowerCamelCase_ = select_one_column
lowerCamelCase_ = allow_empty_column_selection
lowerCamelCase_ = init_cell_selection_weights_to_zero
lowerCamelCase_ = reset_position_index_per_cell
lowerCamelCase_ = disable_per_token_loss
# Aggregation hyperparameters
lowerCamelCase_ = aggregation_labels
lowerCamelCase_ = no_aggregation_label_index
if isinstance(self.aggregation_labels , A_ ):
lowerCamelCase_ = {int(A_ ): v for k, v in aggregation_labels.items()}
| 70 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
_lowerCamelCase = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(_SCREAMING_SNAKE_CASE ).content ).xpath(_SCREAMING_SNAKE_CASE ) )
_lowerCamelCase = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 71 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_UpperCAmelCase : List[str] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( lowercase_ : Any ) -> int:
'''simple docstring'''
lowercase ={}
state_dict.pop('''pixel_mean''' , lowercase_ )
state_dict.pop('''pixel_std''' , lowercase_ )
lowercase =R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase =key.replace(lowercase_ , lowercase_ )
if re.match(lowercase_ , lowercase_ ):
lowercase =int(re.match(lowercase_ , lowercase_ ).group(2 ) )
if layer_nb == 0:
lowercase =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowercase =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowercase =key.replace('''layers.2''' , '''proj_out''' )
lowercase =value
lowercase =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any="ybelkada/segment-anything" ) -> Tuple:
'''simple docstring'''
lowercase =hf_hub_download(lowercase_ , f'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
lowercase =SamConfig()
elif "sam_vit_l" in model_name:
lowercase =SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
lowercase =SamConfig(
vision_config=lowercase_ , )
elif "sam_vit_h" in model_name:
lowercase =SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
lowercase =SamConfig(
vision_config=lowercase_ , )
lowercase =torch.load(lowercase_ , map_location='''cpu''' )
lowercase =replace_keys(lowercase_ )
lowercase =SamImageProcessor()
lowercase =SamProcessor(image_processor=lowercase_ )
lowercase =SamModel(lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase =hf_model.to('''cuda''' )
lowercase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowercase =Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
lowercase =[[[4_0_0, 6_5_0]]]
lowercase =[[1]]
lowercase =processor(images=np.array(lowercase_ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase =hf_model(**lowercase_ )
lowercase =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowercase =processor(
images=np.array(lowercase_ ) , input_points=lowercase_ , input_labels=lowercase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase =hf_model(**lowercase_ )
lowercase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowercase =((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
lowercase =processor(images=np.array(lowercase_ ) , input_boxes=lowercase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase =hf_model(**lowercase_ )
lowercase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowercase =[[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
lowercase =[[1, 1]]
lowercase =processor(
images=np.array(lowercase_ ) , input_points=lowercase_ , input_labels=lowercase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase =hf_model(**lowercase_ )
lowercase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
_UpperCAmelCase : Optional[Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 72 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = """Hello world! cécé herlolip"""
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = FairseqRobertaModel.from_pretrained(snake_case )
roberta.eval() # disable dropout
__SCREAMING_SNAKE_CASE : Dict = roberta.model.encoder.sentence_encoder
__SCREAMING_SNAKE_CASE : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__SCREAMING_SNAKE_CASE : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , snake_case )
__SCREAMING_SNAKE_CASE : Any = XLMRobertaXLForSequenceClassification(snake_case ) if classification_head else XLMRobertaXLForMaskedLM(snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
__SCREAMING_SNAKE_CASE : Dict = roberta_sent_encoder.embed_positions.weight
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__SCREAMING_SNAKE_CASE : Dict = roberta_sent_encoder.layer_norm.weight
__SCREAMING_SNAKE_CASE : Tuple = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__SCREAMING_SNAKE_CASE : BertLayer = model.roberta.encoder.layer[i]
__SCREAMING_SNAKE_CASE : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
__SCREAMING_SNAKE_CASE : RobertaAttention = layer.attention
__SCREAMING_SNAKE_CASE : Dict = roberta_layer.self_attn_layer_norm.weight
__SCREAMING_SNAKE_CASE : Optional[int] = roberta_layer.self_attn_layer_norm.bias
# self attention
__SCREAMING_SNAKE_CASE : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__SCREAMING_SNAKE_CASE : Optional[Any] = roberta_layer.self_attn.q_proj.weight
__SCREAMING_SNAKE_CASE : Any = roberta_layer.self_attn.q_proj.bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
__SCREAMING_SNAKE_CASE : List[Any] = roberta_layer.self_attn.k_proj.bias
__SCREAMING_SNAKE_CASE : str = roberta_layer.self_attn.v_proj.weight
__SCREAMING_SNAKE_CASE : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
__SCREAMING_SNAKE_CASE : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__SCREAMING_SNAKE_CASE : List[Any] = roberta_layer.self_attn.out_proj.weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__SCREAMING_SNAKE_CASE : Tuple = roberta_layer.final_layer_norm.weight
__SCREAMING_SNAKE_CASE : Dict = roberta_layer.final_layer_norm.bias
# intermediate
__SCREAMING_SNAKE_CASE : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__SCREAMING_SNAKE_CASE : str = roberta_layer.fca.weight
__SCREAMING_SNAKE_CASE : List[Any] = roberta_layer.fca.bias
# output
__SCREAMING_SNAKE_CASE : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__SCREAMING_SNAKE_CASE : int = roberta_layer.fca.weight
__SCREAMING_SNAKE_CASE : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
__SCREAMING_SNAKE_CASE : Any = roberta.model.classification_heads['''mnli'''].dense.weight
__SCREAMING_SNAKE_CASE : int = roberta.model.classification_heads['''mnli'''].dense.bias
__SCREAMING_SNAKE_CASE : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight
__SCREAMING_SNAKE_CASE : str = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__SCREAMING_SNAKE_CASE : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
__SCREAMING_SNAKE_CASE : int = roberta.model.encoder.lm_head.dense.bias
__SCREAMING_SNAKE_CASE : int = roberta.model.encoder.lm_head.layer_norm.weight
__SCREAMING_SNAKE_CASE : int = roberta.model.encoder.lm_head.layer_norm.bias
__SCREAMING_SNAKE_CASE : Dict = roberta.model.encoder.lm_head.weight
__SCREAMING_SNAKE_CASE : Any = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__SCREAMING_SNAKE_CASE : torch.Tensor = roberta.encode(snake_case ).unsqueeze(0 ) # batch of size 1
__SCREAMING_SNAKE_CASE : str = model(snake_case )[0]
if classification_head:
__SCREAMING_SNAKE_CASE : List[str] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(snake_case ) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = roberta.model(snake_case )[0]
print(our_output.shape , their_output.shape )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__SCREAMING_SNAKE_CASE : Dict = torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(snake_case ).mkdir(parents=snake_case , exist_ok=snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowercase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 74 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase_ :
def __init__( self ) -> Tuple:
__lowercase : Tuple = {}
def _lowerCamelCase ( self , UpperCamelCase_ ) -> None:
__lowercase : Dict = {}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None:
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
__lowercase : Optional[int] = probability
def _lowerCamelCase ( self ) -> list[str]:
return list(self.connections )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : int = 0
__lowercase : Dict = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : List[str] = Counter(graph.get_nodes() )
__lowercase : Tuple = start
for _ in range(__UpperCamelCase ):
__lowercase : Any = graph.transition(__UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase = 6008_5147_5143 ) -> int:
"""simple docstring"""
try:
__UpperCAmelCase : Dict = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[int] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : Union[str, Any] = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE_: int =[
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/transformers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
UpperCAmelCase_ = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 78 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCAmelCase_ :
__lowerCamelCase = BlenderbotConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=20 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , ):
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = eos_token_id
UpperCAmelCase__ : List[Any] = pad_token_id
UpperCAmelCase__ : Optional[int] = bos_token_id
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = TFBlenderbotModel(config=_lowerCAmelCase ).get_decoder()
UpperCAmelCase__ : Dict = inputs_dict["""input_ids"""]
UpperCAmelCase__ : Union[str, Any] = input_ids[:1, :]
UpperCAmelCase__ : Dict = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase__ : List[Any] = inputs_dict["""head_mask"""]
UpperCAmelCase__ : Optional[Any] = 1
# first forward pass
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ : str = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
UpperCAmelCase__ : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-3 )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Tuple = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = TFBlenderbotModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase = ['My friends are cool but they eat too many carbs.']
__lowerCamelCase = 'facebook/blenderbot-400M-distill'
@cached_property
def __UpperCAmelCase ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.tokenizer(self.src_text , return_tensors="""tf""" )
UpperCAmelCase__ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase__ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 79 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = 'xlm-roberta-xl'
def __init__( self : List[Any] , _lowerCAmelCase : Any=25_0880 , _lowerCAmelCase : List[str]=2560 , _lowerCAmelCase : List[str]=36 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : List[Any]=1_0240 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[Any]=514 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Tuple=1e-05 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]="absolute" , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Union[str, Any] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 80 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 | 0 |
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( __lowerCamelCase ):
create_state_space_tree(__lowerCamelCase , [] , 0 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 81 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 | 0 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , 'html.parser' )
lowercase = soup.find('div' , attrs={'class': 'gs_ri'} )
lowercase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 84 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__( self : Optional[int] , a_ : Tuple , a_ : str , a_ : Union[str, Any] , a_ : Optional[Any]=None , a_ : List[str]=None )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = start
SCREAMING_SNAKE_CASE__ : Any = end
SCREAMING_SNAKE_CASE__ : Any = val
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (start + end) // 2
SCREAMING_SNAKE_CASE__ : List[str] = left
SCREAMING_SNAKE_CASE__ : str = right
def __repr__( self : List[Any] )-> List[str]:
"""simple docstring"""
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class snake_case :
def __init__( self : Tuple , a_ : Sequence , a_ : Dict )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = collection
SCREAMING_SNAKE_CASE__ : Union[str, Any] = function
if self.collection:
SCREAMING_SNAKE_CASE__ : Tuple = self._build_tree(0 , len(a_ ) - 1 )
def __lowercase( self : Union[str, Any] , a_ : List[str] , a_ : List[str] )-> str:
"""simple docstring"""
self._update_tree(self.root , a_ , a_ )
def __lowercase( self : Tuple , a_ : List[Any] , a_ : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return self._query_range(self.root , a_ , a_ )
def __lowercase( self : int , a_ : str , a_ : List[str] )-> int:
"""simple docstring"""
if start == end:
return SegmentTreeNode(a_ , a_ , self.collection[start] )
SCREAMING_SNAKE_CASE__ : Any = (start + end) // 2
SCREAMING_SNAKE_CASE__ : Optional[int] = self._build_tree(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self._build_tree(mid + 1 , a_ )
return SegmentTreeNode(a_ , a_ , self.fn(left.val , right.val ) , a_ , a_ )
def __lowercase( self : str , a_ : str , a_ : List[Any] , a_ : str )-> List[Any]:
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE__ : Optional[int] = val
return
if i <= node.mid:
self._update_tree(node.left , a_ , a_ )
else:
self._update_tree(node.right , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.fn(node.left.val , node.right.val )
def __lowercase( self : Any , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] )-> List[str]:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , a_ , a_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , a_ , node.mid ) , self._query_range(node.right , node.mid + 1 , a_ ) , )
else:
# range in right child tree
return self._query_range(node.right , a_ , a_ )
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE__ : Dict = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE__ : Any = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
SCREAMING_SNAKE_CASE__ : Any = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 85 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : str ):
A_ = (0, 0)
A_ = None
A_ = 0
A_ = 0
A_ = 0
def __eq__( self : str , UpperCAmelCase : str ):
return self.position == cell.position
def __A ( self : Union[str, Any] ):
print(self.position )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any]=(5, 5) ):
A_ = np.zeros(UpperCAmelCase )
A_ = world_size[0]
A_ = world_size[1]
def __A ( self : Dict ):
print(self.w )
def __A ( self : List[Any] , UpperCAmelCase : List[str] ):
A_ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A_ = cell.position[0]
A_ = cell.position[1]
A_ = []
for n in neughbour_cord:
A_ = current_x + n[0]
A_ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A_ = Cell()
A_ = (x, y)
A_ = cell
neighbours.append(UpperCAmelCase )
return neighbours
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : str ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = []
A_ = []
_open.append(__UpperCamelCase )
while _open:
A_ = np.argmin([n.f for n in _open] )
A_ = _open[min_f]
_closed.append(_open.pop(__UpperCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(__UpperCamelCase ):
for c in _closed:
if c == n:
continue
A_ = current.g + 1
A_ , A_ = n.position
A_ , A_ = goal.position
A_ = (ya - ya) ** 2 + (xa - xa) ** 2
A_ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__UpperCamelCase )
A_ = []
while current.parent is not None:
path.append(current.position )
A_ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__a :List[Any] = Gridworld()
# Start position and goal
__a :str = Cell()
__a :Union[str, Any] = (0, 0)
__a :int = Cell()
__a :int = (4, 4)
print(F"path from {start.position} to {goal.position}")
__a :List[str] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__a :Tuple = 1
print(world.w) | 86 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pix2struct_text_model'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , UpperCAmelCase__ : Any=50_244 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Union[str, Any]=64 , UpperCAmelCase__ : Union[str, Any]=2_048 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Optional[int]=128 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=1e-6 , UpperCAmelCase__ : Dict=1.0 , UpperCAmelCase__ : Any="gelu_new" , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Any=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Union[str, Any]:
'''simple docstring'''
A__ = vocab_size
A__ = hidden_size
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = use_cache
A__ = eos_token_id
A__ = decoder_start_token_id
# for backwards compatibility
A__ = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , is_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Any) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
A__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pix2struct_vision_model'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : int=2_048 , UpperCAmelCase__ : int=64 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Union[str, Any]=1e-6 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Any=1e-10 , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : Optional[int]=4_096 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : int=128 , **UpperCAmelCase__ : Dict , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = patch_embed_hidden_size
A__ = d_ff
A__ = dropout_rate
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = dense_act_fn
A__ = seq_len
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Tuple) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pix2struct'''
UpperCAmelCase__ = True
def __init__( self : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(tie_word_embeddings=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
if text_config is None:
A__ = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''')
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''')
A__ = PixaStructTextConfig(**UpperCAmelCase__)
A__ = PixaStructVisionConfig(**UpperCAmelCase__)
A__ = self.text_config.decoder_start_token_id
A__ = self.text_config.pad_token_id
A__ = self.text_config.eos_token_id
A__ = initializer_factor
A__ = initializer_range
A__ = self.initializer_range
A__ = self.initializer_range
A__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , UpperCAmelCase__ : PixaStructTextConfig , UpperCAmelCase__ : PixaStructVisionConfig , **UpperCAmelCase__ : Dict) ->Tuple:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 87 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( A_ ,unittest.TestCase ):
__UpperCAmelCase = None
__UpperCAmelCase = BloomTokenizerFast
__UpperCAmelCase = BloomTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = '''tokenizer_file'''
__UpperCAmelCase = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCamelCase_ ( self) -> int:
super().setUp()
_lowerCamelCase : Optional[int] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""")
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> str:
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
_lowerCamelCase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
_lowerCamelCase : Union[str, Any] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
_lowerCamelCase : Any = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE)["""input_ids"""]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=6) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_lowerCamelCase : Optional[Any] = """This is a simple input"""
_lowerCamelCase : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCamelCase : List[Any] = ("""This is a simple input""", """This is a pair""")
_lowerCamelCase : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE)
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE)
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE)
tokenizer_r.encode(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE)
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE)
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""")
_lowerCamelCase : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""")
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""")
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""")
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""")
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" , )
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : Tuple = self.get_rust_tokenizer()
_lowerCamelCase : int = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = next(iter(SCREAMING_SNAKE_CASE))["""premise"""] # pick up one data
_lowerCamelCase : Optional[int] = list(sample_data.values())
_lowerCamelCase : str = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE))
_lowerCamelCase : List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
class _lowerCamelCase( _a ):
lowercase_ : str = """summarization"""
lowercase_ : List[str] = ["""loss"""]
lowercase_ : Union[str, Any] = ROUGE_KEYS
lowercase_ : int = """rouge2"""
def __init__( self, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_lowercase : Optional[int] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(lowerCamelCase, num_labels=lowerCamelCase, mode=self.mode, **lowerCamelCase)
use_task_specific_params(self.model, 'summarization')
save_git_info(self.hparams.output_dir)
_lowercase : Any = Path(self.output_dir) / 'metrics.json'
_lowercase : Optional[Any] = Path(self.output_dir) / 'hparams.pkl'
pickle_save(self.hparams, self.hparams_save_path)
_lowercase : Tuple = 0
_lowercase : Tuple = defaultdict(lowerCamelCase)
_lowercase : Any = self.config.model_type
_lowercase : Dict = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
_lowercase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowercase : Optional[int] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
_lowercase : Optional[int] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowercase : List[str] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
_lowercase : int = get_git_info()['repo_sha']
_lowercase : str = hparams.num_workers
_lowercase : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, lowerCamelCase):
_lowercase : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowercase : List[str] = self.decoder_start_token_id
_lowercase : str = (
SeqaSeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeqaSeqDataset
)
_lowercase : Tuple = False
_lowercase : List[str] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowercase : Optional[Any] = self.hparams.eval_max_gen_length
else:
_lowercase : List[Any] = self.model.config.max_length
_lowercase : Any = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase ( self, lowerCamelCase) -> Dict[str, List[str]]:
"""simple docstring"""
_lowercase : str = {
k: self.tokenizer.batch_decode(v.tolist()) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase, Path(self.output_dir) / 'text_batch.json')
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / 'tok_batch.json')
_lowercase : Optional[Any] = True
return readable_batch
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
return self.model(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = self.tokenizer.batch_decode(
lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase)
return lmap(str.strip, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = self.tokenizer.pad_token_id
_lowercase , _lowercase : int = batch['input_ids'], batch['attention_mask']
_lowercase : Dict = batch['labels']
if isinstance(self.model, lowerCamelCase):
_lowercase : int = self.model._shift_right(lowerCamelCase)
else:
_lowercase : Optional[int] = shift_tokens_right(lowerCamelCase, lowerCamelCase)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowercase : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase)
_lowercase : Union[str, Any] = self(lowerCamelCase, attention_mask=lowerCamelCase, decoder_input_ids=lowerCamelCase, use_cache=lowerCamelCase)
_lowercase : Optional[Any] = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowercase : List[Any] = nn.CrossEntropyLoss(ignore_index=lowerCamelCase)
assert lm_logits.shape[-1] == self.vocab_size
_lowercase : List[Any] = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
else:
_lowercase : Union[str, Any] = nn.functional.log_softmax(lowerCamelCase, dim=-1)
_lowercase , _lowercase : Dict = label_smoothed_nll_loss(
lowerCamelCase, lowerCamelCase, self.hparams.label_smoothing, ignore_index=lowerCamelCase)
return (loss,)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = self._step(lowerCamelCase)
_lowercase : Tuple = dict(zip(self.loss_names, lowerCamelCase))
# tokens per batch
_lowercase : List[str] = batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum()
_lowercase : Dict = batch['input_ids'].shape[0]
_lowercase : Any = batch['input_ids'].eq(self.pad).sum()
_lowercase : str = batch['input_ids'].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
return self._generative_step(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="val") -> Dict:
"""simple docstring"""
self.step_count += 1
_lowercase : Union[str, Any] = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
_lowercase : Optional[Any] = losses['loss']
_lowercase : Any = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
_lowercase : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowercase : torch.FloatTensor = torch.tensor(lowerCamelCase).type_as(lowerCamelCase)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(lowerCamelCase)
_lowercase : Dict = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_lowercase : Dict = self.step_count
self.metrics[prefix].append(lowerCamelCase) # callback writes this to self.metrics_save_path
_lowercase : Optional[Any] = flatten_list([x['preds'] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
return calculate_rouge(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> dict:
"""simple docstring"""
_lowercase : List[str] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowercase : int = self.model.generate(
batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=lowerCamelCase, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, )
_lowercase : Optional[Any] = (time.time() - ta) / batch['input_ids'].shape[0]
_lowercase : List[str] = self.ids_to_clean_text(lowerCamelCase)
_lowercase : List[str] = self.ids_to_clean_text(batch['labels'])
_lowercase : List[str] = self._step(lowerCamelCase)
_lowercase : Optional[int] = dict(zip(self.loss_names, lowerCamelCase))
_lowercase : Dict = self.calc_generative_metrics(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = np.mean(lmap(lowerCamelCase, lowerCamelCase))
base_metrics.update(gen_time=lowerCamelCase, gen_len=lowerCamelCase, preds=lowerCamelCase, target=lowerCamelCase, **lowerCamelCase)
return base_metrics
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
return self._generative_step(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
return self.validation_epoch_end(lowerCamelCase, prefix='test')
def UpperCamelCase ( self, lowerCamelCase) -> SeqaSeqDataset:
"""simple docstring"""
_lowercase : int = self.n_obs[type_path]
_lowercase : Union[str, Any] = self.target_lens[type_path]
_lowercase : Dict = self.dataset_class(
self.tokenizer, type_path=lowerCamelCase, n_obs=lowerCamelCase, max_target_length=lowerCamelCase, **self.dataset_kwargs, )
return dataset
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = False) -> DataLoader:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_dataset(lowerCamelCase)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowercase : Dict = dataset.make_sortish_sampler(lowerCamelCase, distributed=self.hparams.gpus > 1)
return DataLoader(
lowerCamelCase, batch_size=lowerCamelCase, collate_fn=dataset.collate_fn, shuffle=lowerCamelCase, num_workers=self.num_workers, sampler=lowerCamelCase, )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowercase : Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1)
return DataLoader(
lowerCamelCase, batch_sampler=lowerCamelCase, collate_fn=dataset.collate_fn, num_workers=self.num_workers, )
else:
return DataLoader(
lowerCamelCase, batch_size=lowerCamelCase, collate_fn=dataset.collate_fn, shuffle=lowerCamelCase, num_workers=self.num_workers, sampler=lowerCamelCase, )
def UpperCamelCase ( self) -> DataLoader:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=lowerCamelCase)
return dataloader
def UpperCamelCase ( self) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def UpperCamelCase ( self) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCamelCase, lowerCamelCase)
add_generic_args(lowerCamelCase, lowerCamelCase)
parser.add_argument(
'--max_source_length', default=10_24, type=lowerCamelCase, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--max_target_length', default=56, type=lowerCamelCase, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--val_max_target_length', default=1_42, type=lowerCamelCase, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--test_max_target_length', default=1_42, type=lowerCamelCase, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=lowerCamelCase)
parser.add_argument('--overwrite_output_dir', action='store_true', default=lowerCamelCase)
parser.add_argument('--max_tokens_per_batch', type=lowerCamelCase, default=lowerCamelCase)
parser.add_argument('--logger_name', type=lowerCamelCase, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=lowerCamelCase, default=-1, required=lowerCamelCase, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=lowerCamelCase, default=5_00, required=lowerCamelCase, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=lowerCamelCase, default=-1, required=lowerCamelCase, help='# examples. -1 means use all.')
parser.add_argument(
'--task', type=lowerCamelCase, default='summarization', required=lowerCamelCase, help='# examples. -1 means use all.')
parser.add_argument('--label_smoothing', type=lowerCamelCase, default=0.0, required=lowerCamelCase)
parser.add_argument('--src_lang', type=lowerCamelCase, default='', required=lowerCamelCase)
parser.add_argument('--tgt_lang', type=lowerCamelCase, default='', required=lowerCamelCase)
parser.add_argument('--eval_beams', type=lowerCamelCase, default=lowerCamelCase, required=lowerCamelCase)
parser.add_argument(
'--val_metric', type=lowerCamelCase, default=lowerCamelCase, required=lowerCamelCase, choices=['bleu', 'rouge2', 'loss', None])
parser.add_argument('--eval_max_gen_length', type=lowerCamelCase, default=lowerCamelCase, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=lowerCamelCase, default=1, required=lowerCamelCase, help='How many checkpoints to save')
parser.add_argument(
'--early_stopping_patience', type=lowerCamelCase, default=-1, required=lowerCamelCase, help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
), )
return parser
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = """translation"""
lowercase_ : List[Any] = ["""loss"""]
lowercase_ : int = ["""bleu"""]
lowercase_ : Dict = """bleu"""
def __init__( self, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = hparams.src_lang
_lowercase : int = hparams.tgt_lang
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> dict:
"""simple docstring"""
return calculate_bleu(lowerCamelCase, lowerCamelCase)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=lowerCamelCase_ )
check_output_dir(lowerCamelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
_lowercase : SummarizationModule = SummarizationModule(lowerCamelCase_ )
else:
_lowercase : SummarizationModule = TranslationModule(lowerCamelCase_ )
_lowercase : Tuple = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
_lowercase : Dict = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowercase : Optional[Any] = os.environ.get('WANDB_PROJECT' , lowerCamelCase_ )
_lowercase : Optional[int] = WandbLogger(name=model.output_dir.name , project=lowerCamelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowercase : List[str] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
_lowercase : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = args.val_metric == 'loss'
_lowercase : pl.Trainer = generic_train(
lowerCamelCase_ , lowerCamelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowerCamelCase_ ) , early_stopping_callback=lowerCamelCase_ , logger=lowerCamelCase_ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
_lowercase : int = ''
_lowercase : int = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=lowerCamelCase_ ) )
if checkpoints:
_lowercase : List[str] = checkpoints[-1]
_lowercase : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE : str = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE : Union[str, Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE : int = parser.parse_args()
main(args)
| 89 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) | 90 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spiece.model'''}
_lowercase = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
_lowercase = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,A_ : Any ,A_ : int=False ,A_ : List[Any]=False ,A_ : Optional[int]=False ,A_ : Union[str, Any]=None ,A_ : Tuple=None ,A_ : Any=None ,A_ : int=None ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Tuple ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
A = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
A = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A = '<|endoftext|>' if eos_token is None else eos_token
A = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A = unk_token if pad_token is None else pad_token
A = eos_token if bos_token is None else bos_token
else:
A = '<pad>' if pad_token is None else pad_token
A = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=A_ ,remove_space=A_ ,keep_accents=A_ ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# Used for whitespace normalization in input texts
# fmt : off
A = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A = re.compile(
F'[{"".join(map(A_ ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self : Optional[Any] ) -> Tuple:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Dict ,A_ : List[str] ) -> str:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str ) -> str:
A = self.non_printing_characters_re.sub('' ,A_ )
# Normalize whitespaces
A = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
A = unicodedata.normalize('NFC' ,A_ )
return text
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,**A_ : Any ) -> List[str]:
A = self.preprocess_text(A_ )
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ) -> int:
return self.sp_model.PieceToId(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ) -> str:
return self.sp_model.IdToPiece(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : str ) -> str:
return out_string
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> str:
A = []
A = ''
A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
A = True
A = []
else:
current_sub_tokens.append(A_ )
A = False
out_string += self.sp_model.decode(A_ )
return out_string
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, int]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, List[str]] ,A_ : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(A_ ,A_ ):
A = self.preprocess_text(A_ )
A = self.sp_model.encode(A_ )
else:
A = [self.preprocess_text(A_ ) for t in text]
A = self.sp_model.encode(A_ )
if return_tensors is True or return_tensors == "pt":
A = torch.tensor(A_ )
return token_ids
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Union[int, List[int]] ) -> str:
return self.sp_model.decode(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : "Conversation" ) -> List[int]:
A = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
A = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(A_ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=A_ ) | 91 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = GPTaTokenizer
lowerCamelCase_ = GPTaTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = {'add_prefix_space': True}
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Any =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowercase : Optional[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Tuple =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Dict ={'''unk_token''': '''<unk>'''}
lowercase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : str , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : str , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : Any ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Tuple ='''lower newer'''
lowercase : List[str] =['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : Tuple =tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Tuple =tokens + [tokenizer.unk_token]
lowercase : Any =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : List[Any] =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__ )
lowercase : Optional[Any] ='''lower newer'''
# Testing tokenization
lowercase : Any =tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Optional[Any] =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing conversion to ids without special tokens
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : List[str] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing conversion to ids with special tokens
lowercase : int =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__ )
lowercase : Union[str, Any] =tokenizer.encode(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Any =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing the unknown token
lowercase : Any =tokens + [rust_tokenizer.unk_token]
lowercase : int =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : int ):
'''simple docstring'''
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
lowercase : Union[str, Any] ='''This is a simple input'''
lowercase : Dict =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Dict =('''This is a simple input''', '''This is a pair''')
lowercase : Optional[Any] =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[str] =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowercase : Dict ='''This is a simple input'''
lowercase : Optional[int] =['''This is a simple input looooooooong''', '''This is a simple input''']
lowercase : int =('''This is a simple input''', '''This is a pair''')
lowercase : Optional[int] =[
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowercase : str =tokenizer.pad_token_id
lowercase : int =tokenizer(UpperCAmelCase__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
lowercase : List[Any] =tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors='''np''' )
lowercase : Optional[int] =tokenizer(*UpperCAmelCase__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
lowercase : Union[str, Any] =tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''$$$'''
lowercase : List[Any] =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase__ , add_bos_token=UpperCAmelCase__ )
lowercase : List[Any] ='''This is a simple input'''
lowercase : Optional[Any] =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Union[str, Any] =tokenizer.bos_token_id
lowercase : Optional[Any] =tokenizer(UpperCAmelCase__ )
lowercase : List[str] =tokenizer(UpperCAmelCase__ )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase : str =tokenizer.decode(out_s.input_ids )
lowercase : List[str] =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase : List[str] =[self.get_tokenizer(do_lower_case=UpperCAmelCase__ , add_bos_token=UpperCAmelCase__ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase : int ='''Encode this.'''
lowercase : List[str] ='''This one too please.'''
lowercase : Dict =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
encoded_sequence += tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.encode_plus(
UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , )
lowercase : Dict =encoded_sequence_dict['''input_ids''']
lowercase : str =encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
lowercase : str =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(UpperCAmelCase__ )
]
lowercase : Any =[x for x in filtered_sequence if x is not None]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase : List[Any] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCAmelCase__ )
lowercase : str ='''A photo of a cat'''
lowercase : Optional[int] =tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
lowercase : int =AutoTokenizer.from_pretrained('''./test_opt''' )
lowercase : Optional[Any] =tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=UpperCAmelCase__ )
lowercase : Any ='''A photo of a cat'''
lowercase : List[str] =tokenizer.encode(
UpperCAmelCase__ , )
# Same as above
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCAmelCase__ )
lowercase : List[str] ='''bos'''
lowercase : List[Any] =tokenizer.get_vocab()['''bos''']
lowercase : Optional[int] ='''A photo of a cat'''
lowercase : Tuple =tokenizer.encode(
UpperCAmelCase__ , )
# We changed the bos token
self.assertEqual(UpperCAmelCase__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
lowercase : Tuple =AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
lowercase : Optional[int] =tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [31957, 250, 1345, 9, 10, 4758] )
| 92 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(_SCREAMING_SNAKE_CASE , 2 ) - pow(_SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_SCREAMING_SNAKE_CASE , 2 ) - pow(_SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_SCREAMING_SNAKE_CASE , 2 ) + pow(_SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE = TypeVar('KEY')
SCREAMING_SNAKE_CASE = TypeVar('VAL')
@dataclass(frozen=__A , slots=__A )
class UpperCAmelCase_ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class UpperCAmelCase_ ( _Item ):
"""simple docstring"""
def __init__( self : Dict ) -> None:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __bool__( self : Any ) -> bool:
'''simple docstring'''
return False
SCREAMING_SNAKE_CASE = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int = 8 , UpperCAmelCase : float = 0.7_5 ) -> None:
'''simple docstring'''
lowercase : Dict =initial_block_size
lowercase : list[_Item | None] =[None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase : Tuple =capacity_factor
lowercase : Union[str, Any] =0
def A__ ( self : List[str] , UpperCAmelCase : KEY ) -> int:
'''simple docstring'''
return hash(UpperCAmelCase ) % len(self._buckets )
def A__ ( self : Optional[Any] , UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : KEY , UpperCAmelCase : VAL ) -> bool:
'''simple docstring'''
lowercase : Union[str, Any] =self._buckets[ind]
if not stored:
lowercase : List[Any] =_Item(UpperCAmelCase , UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
lowercase : List[Any] =_Item(UpperCAmelCase , UpperCAmelCase )
return True
else:
return False
def A__ ( self : List[Any] ) -> bool:
'''simple docstring'''
lowercase : Optional[int] =len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCAmelCase )
def A__ ( self : Union[str, Any] ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase : List[str] =len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def A__ ( self : List[Any] , UpperCAmelCase : int ) -> None:
'''simple docstring'''
lowercase : str =self._buckets
lowercase : Tuple =[None] * new_size
lowercase : Tuple =0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def A__ ( self : Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def A__ ( self : List[str] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def A__ ( self : List[str] , UpperCAmelCase : KEY ) -> Iterator[int]:
'''simple docstring'''
lowercase : int =self._get_bucket_index(UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowercase : int =self._get_next_ind(UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : KEY , UpperCAmelCase : VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
if self._try_set(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
break
def __setitem__( self : Optional[int] , UpperCAmelCase : KEY , UpperCAmelCase : VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(UpperCAmelCase , UpperCAmelCase )
def __delitem__( self : List[str] , UpperCAmelCase : KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
lowercase : Optional[int] =self._buckets[ind]
if item is None:
raise KeyError(UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
lowercase : Optional[int] =_deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , UpperCAmelCase : KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
lowercase : str =self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCAmelCase )
def __len__( self : str ) -> int:
'''simple docstring'''
return self._len
def __iter__( self : Optional[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any] ) -> str:
'''simple docstring'''
lowercase : str =''' ,'''.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 94 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
lowerCamelCase_ = '''>>zh<<'''
lowerCamelCase_ = '''Helsinki-NLP/'''
if is_torch_available():
lowerCamelCase_ = '''pt'''
elif is_tf_available():
lowerCamelCase_ = '''tf'''
else:
lowerCamelCase_ = '''jax'''
@require_sentencepiece
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = MarianTokenizer
__magic_name__ = False
__magic_name__ = True
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
super().setUp()
UpperCAmelCase_ : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : Optional[Any] = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
UpperCAmelCase_ : str = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase_ : Tuple ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
UpperCAmelCase_ : List[str] = "</s>"
UpperCAmelCase_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase_ ) , 9 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
UpperCAmelCase_ : Optional[int] = en_de_tokenizer(["I am a small frog"] , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(lowerCAmelCase_ , batch.input_ids[0] )
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = [x.name for x in Path(lowerCAmelCase_ ).glob("*" )]
self.assertIn("source.spm" , lowerCAmelCase_ )
MarianTokenizer.from_pretrained(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = tok(
["I am a small frog" * 1_000, "I am a small frog"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
# fmt: off
UpperCAmelCase_ : int = {"input_ids": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : int = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
UpperCAmelCase_ : Dict = "Tämä on testi"
UpperCAmelCase_ : Optional[Any] = "This is a test"
UpperCAmelCase_ : Union[str, Any] = [76, 7, 2_047, 2]
UpperCAmelCase_ : Optional[int] = [69, 12, 11, 940, 2]
UpperCAmelCase_ : int = tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer(text_target=lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 96 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowercase__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> str:
lowercase_ = data
lowercase_ = None
class lowercase__:
"""simple docstring"""
def __init__( self : Any ) -> str:
lowercase_ = None
lowercase_ = None
def __iter__( self : str ) -> Iterator[Any]:
lowercase_ = self.head
while self.head:
yield node.data
lowercase_ = node.next
if node == self.head:
break
def __len__( self : Tuple ) -> int:
return sum(1 for _ in self )
def __repr__( self : Tuple ) -> List[str]:
return "->".join(str(SCREAMING_SNAKE_CASE_ ) for item in iter(self ) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None:
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any ) -> None:
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
lowercase_ = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
lowercase_ = new_node # first node points itself
lowercase_ = lowercase_ = new_node
elif index == 0: # insert at head
lowercase_ = self.head
lowercase_ = lowercase_ = new_node
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = new_node
if index == len(self ) - 1: # insert at tail
lowercase_ = new_node
def _lowercase ( self : List[Any] ) -> Dict:
return self.delete_nth(0 )
def _lowercase ( self : Any ) -> Any:
return self.delete_nth(len(self ) - 1 )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
lowercase_ = self.head
if self.head == self.tail: # just one node
lowercase_ = lowercase_ = None
elif index == 0: # delete head node
lowercase_ = self.tail.next.next
lowercase_ = self.head.next
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowercase_ = temp
return delete_node.data
def _lowercase ( self : List[Any] ) -> bool:
return len(self ) == 0
def a ( ):
'''simple docstring'''
lowercase_ = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : int = 0 ) -> Dict:
'''simple docstring'''
_UpperCamelCase = key
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_UpperCamelCase = ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_UpperCamelCase = ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 98 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures')
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json')
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = 0
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__a = AutoFeatureExtractor.from_pretrained(__A ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__a = WavaVecaFeatureExtractor(**__A )
# save in new folder
model_config.save_pretrained(__A )
config.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A )
# make sure private variable is not incorrectly saved
__a = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
__a = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__a = AutoFeatureExtractor.from_pretrained(__A , revision="""aaaaaa""" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__a = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case_ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def snake_case_ ( self ):
try:
AutoConfig.register("""custom""" , __A )
AutoFeatureExtractor.register(__A , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoFeatureExtractor.register(__A , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
__a = CustomFeatureExtractor.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ):
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = True
try:
AutoConfig.register("""custom""" , __A )
AutoFeatureExtractor.register(__A , __A )
# If remote code is not set, the default is to use local
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(__A , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 99 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
import heapq
import sys
import numpy as np
_A : str = tuple[int, int]
class __snake_case :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set()
def lowercase_ ( self ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def lowercase_ ( self ):
'''simple docstring'''
return len(self.elements ) == 0
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(A_ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE__ = []
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowercase_ ( self , A_ ):
'''simple docstring'''
if item in self.set:
self.set.remove(A_ )
SCREAMING_SNAKE_CASE__ = []
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowercase_ ( self ):
'''simple docstring'''
return self.elements[0][1]
def lowercase_ ( self ):
'''simple docstring'''
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = heapq.heappop(self.elements )
self.set.remove(A_ )
return (priority, item)
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
# euclidean distance
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ )
return np.linalg.norm(a - b )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# integer division by time variable
return consistent_heuristic(lowerCAmelCase_ , lowerCAmelCase_ ) // t
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = g_function[start] + Wa * heuristics[i](lowerCAmelCase_ , lowerCAmelCase_ )
return ans
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = np.chararray((n, n) )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''*'''
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE__ = '''#'''
SCREAMING_SNAKE_CASE__ = '''-'''
SCREAMING_SNAKE_CASE__ = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = x
# print(x)
SCREAMING_SNAKE_CASE__ = '''-'''
SCREAMING_SNAKE_CASE__ = back_pointer[x]
SCREAMING_SNAKE_CASE__ = '''-'''
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
SCREAMING_SNAKE_CASE__ = back_pointer[goal]
while x != start:
print(lowerCAmelCase_ , end=''' ''' )
SCREAMING_SNAKE_CASE__ = back_pointer[x]
print(lowerCAmelCase_ )
sys.exit()
def __snake_case ( lowerCAmelCase_ ) -> str:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Tuple:
for itera in range(lowerCAmelCase_ ):
open_list[itera].remove_element(lowerCAmelCase_ )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = s
SCREAMING_SNAKE_CASE__ = (x - 1, y)
SCREAMING_SNAKE_CASE__ = (x + 1, y)
SCREAMING_SNAKE_CASE__ = (x, y + 1)
SCREAMING_SNAKE_CASE__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = float('''inf''' )
if valid(lowerCAmelCase_ ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE__ = g_function[s] + 1
SCREAMING_SNAKE_CASE__ = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase_ , key(lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase_ ):
if key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) <= Wa * key(
lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ ):
open_list[j].put(
lowerCAmelCase_ , key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
def __snake_case ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
_A : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_A : Dict = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_A : Union[str, Any] = make_common_ground()
_A : Optional[Any] = blocks_blk
# hyper parameters
_A : Optional[int] = 1
_A : Optional[int] = 1
_A : str = 20
_A : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
_A : Any = (0, 0)
_A : int = (n - 1, n - 1)
_A : str = 1
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
SCREAMING_SNAKE_CASE__ = {start: 0, goal: float('''inf''' )}
SCREAMING_SNAKE_CASE__ = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set()
for i in range(lowerCAmelCase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase_ , key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowerCAmelCase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = open_list[i].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
close_list_inad.append(lowerCAmelCase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = open_list[0].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
close_list_anchor.append(lowerCAmelCase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 100 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 1_8, 2]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True if 'large' in model_name or 'huge' in model_name else False
SCREAMING_SNAKE_CASE_ : List[str] = True if 'large' in model_name or 'huge' in model_name else False
SCREAMING_SNAKE_CASE_ : Tuple = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ : Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ : str = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ : Any = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ : Any = 9_6
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ : str = 9_6
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ : Any = 1_2_8
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ : List[Any] = 1_9_2
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = 2_5_6
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3_5_2
# set label information
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'imagenet-22k-id2label.json'
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : Any = json.load(open(hf_hub_download(A__, A__, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE_ : str = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : int = FocalNetConfig(
embed_dim=A__, depths=A__, focal_levels=A__, focal_windows=A__, use_conv_embed=A__, idalabel=A__, labelaid=A__, use_post_layernorm=A__, use_layerscale=A__, )
return config
def a__ ( A__ ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('patch_embed.norm', 'embeddings.norm' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ : Any = 'encoder.' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('encoder.layers', 'encoder.stages' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('downsample.proj', 'downsample.projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('blocks', 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('modulation.f', 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('modulation.h', 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('modulation.proj', 'modulation.projection_out' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ : int = 'layernorm.weight'
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ : List[Any] = 'layernorm.bias'
if "head" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('head', 'classifier' )
else:
SCREAMING_SNAKE_CASE_ : List[str] = 'focalnet.' + name
return name
def a__ ( A__, A__, A__=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[int] = model_name_to_url[model_name]
print('Checkpoint URL: ', A__ )
SCREAMING_SNAKE_CASE_ : str = torch.hub.load_state_dict_from_url(A__, map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE_ : Dict = val
SCREAMING_SNAKE_CASE_ : List[Any] = get_focalnet_config(A__ )
SCREAMING_SNAKE_CASE_ : int = FocalNetForImageClassification(A__ )
model.eval()
# load state dict
model.load_state_dict(A__ )
# verify conversion
SCREAMING_SNAKE_CASE_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : List[Any] = BitImageProcessor(
do_resize=A__, size={'shortest_edge': 2_5_6}, resample=PILImageResampling.BILINEAR, do_center_crop=A__, crop_size=2_2_4, do_normalize=A__, image_mean=A__, image_std=A__, )
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open(requests.get(A__, stream=A__ ).raw )
SCREAMING_SNAKE_CASE_ : str = processor(images=A__, return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : int = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] ),
] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_transforms(A__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, A__, atol=1E-4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.logits.argmax(-1 ).item()
print('Predicted class:', model.config.idalabel[predicted_class_idx] )
print('First values of logits:', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3], A__, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ : Optional[int] =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 101 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__magic_name__ : List[Any] = ["""small""", """medium""", """large"""]
__magic_name__ : int = """lm_head.decoder.weight"""
__magic_name__ : Any = """lm_head.weight"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = torch.load(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = d.pop(SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
__magic_name__ : Union[str, Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__magic_name__ : Any = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__magic_name__ : Optional[int] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 102 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Any=1_3 , __lowerCamelCase : int=3_0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[Any]=3_7 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Tuple=1_0 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=2 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
_snake_case = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case = 1
_snake_case = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = self.type_sequence_label_size
_snake_case = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[int] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A__ : Dict = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[int] = True
A__ : str = False
A__ : Any = False
A__ : List[Any] = False
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = ViTModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def snake_case ( ) -> Any:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**__lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_snake_case = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_snake_case = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__lowerCamelCase )
_snake_case = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_8_0 )
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
_snake_case = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(__lowerCamelCase , interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase )
_snake_case = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
_snake_case = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_snake_case = model(__lowerCamelCase )
| 103 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = old_name.split('.' )
if layer == "0":
SCREAMING_SNAKE_CASE_ : Any = old_name.replace('0' , 'convolution1' )
elif layer == "1":
SCREAMING_SNAKE_CASE_ : List[str] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
SCREAMING_SNAKE_CASE_ : Any = old_name.replace('3' , 'convolution2' )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = R'\b\d{2}\b'
if bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.search(R'\d\.\d\d.' , lowerCamelCase_ ).group()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = re.search(R'\d\.\d.' , lowerCamelCase_ ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE_ : int = old_name.replace(lowerCamelCase_ , '' )
SCREAMING_SNAKE_CASE_ : Dict = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
SCREAMING_SNAKE_CASE_ : List[Any] = 'intermediate_stages.' + trimmed_name
else:
SCREAMING_SNAKE_CASE_ : Any = old_name.replace(lowerCamelCase_ , '' )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE_ : str = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
SCREAMING_SNAKE_CASE_ : Dict = str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE_ : Any = trimmed_name.replace('fc2' , 'linear_out' )
SCREAMING_SNAKE_CASE_ : List[Any] = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE_ : List[Any] = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE_ : Tuple = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE_ : Tuple = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
SCREAMING_SNAKE_CASE_ : Tuple = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE_ : Any = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE_ : Optional[int] = new_name.replace('norm' , 'layernorm' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'efficientformer.' + new_name
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'efficientformer.encoder.' + new_name
return new_name
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ) -> Dict:
"""simple docstring"""
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return checkpoint
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
def __UpperCAmelCase ( lowerCamelCase_ : Path , lowerCamelCase_ : Path , lowerCamelCase_ : Path , lowerCamelCase_ : bool ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.load(lowerCamelCase_ , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE_ : Any = EfficientFormerConfig.from_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
SCREAMING_SNAKE_CASE_ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE_ : Tuple = convert_torch_checkpoint(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE_ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = 2_56
SCREAMING_SNAKE_CASE_ : Any = 2_24
SCREAMING_SNAKE_CASE_ : Any = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE_ : List[str] = Compose(
[
Resize(lowerCamelCase_ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowerCamelCase_ ),
ToTensor(),
Normalize(lowerCamelCase_ , lowerCamelCase_ ),
] )
SCREAMING_SNAKE_CASE_ : Any = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = outputs.logits
SCREAMING_SNAKE_CASE_ : Any = (1, 10_00)
if "l1" in model_name:
SCREAMING_SNAKE_CASE_ : str = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , lowerCamelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , lowerCamelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(lowerCamelCase_ )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 105 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
from __future__ import annotations
__snake_case :Union[str, Any] =tuple[int, int, int]
__snake_case :Tuple =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__snake_case :Tuple ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__snake_case :List[str] ='EGZWVONAHDCLFQMSIPJBYUKXTR'
__snake_case :Any ='FOBHMDKEXQNRAULPGSJVTYICZW'
__snake_case :Union[str, Any] ='ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__snake_case :List[str] ={
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__snake_case :List[Any] ='RMDJXFUWGISLHVTCQNKYPBEZOA'
__snake_case :Tuple ='SGLCPQWZHKXAREONTFBVIYJUDM'
__snake_case :str ='HVSICLTYKQUBXDWAJZOMFGPREN'
__snake_case :int ='RZWQHFMVDBKICJLNTUXAGYPSOE'
__snake_case :Dict ='LFKIJODBEGAMQPXVUHYSTCZRWN'
__snake_case :Any ='KOAEGVDHXPQZMLFTYWJNBRCIUS'
def lowerCamelCase_ ( lowerCAmelCase__ : RotorPositionT , lowerCAmelCase__ : RotorSelectionT , lowerCAmelCase__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
A = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
A , A , A = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
A = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
A = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
A = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
A = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> dict[str, str]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A = F'''Plugboard setting isn\'t type string ({type(lowerCAmelCase__ )})'''
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
A = F'''Odd number of symbols ({len(lowerCAmelCase__ )})'''
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
A = set()
for i in pbstring:
if i not in abc:
A = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
A = F'''Duplicate symbol ({i})'''
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
A = {}
for j in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ):
A = pbstring[j + 1]
A = pbstring[j]
return pb
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : RotorPositionT , lowerCAmelCase__ : RotorSelectionT = (rotora, rotora, rotora) , lowerCAmelCase__ : str = "" , ) -> str:
'''simple docstring'''
A = text.upper()
A , A , A = _validator(
lowerCAmelCase__ , lowerCAmelCase__ , plugb.upper() )
A , A , A = rotor_position
A , A , A = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
A = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
A = plugboard[symbol]
# rotor ra --------------------------
A = abc.index(lowerCAmelCase__ ) + rotorposa
A = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
A = abc.index(lowerCAmelCase__ ) + rotorposa
A = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
A = abc.index(lowerCAmelCase__ ) + rotorposa
A = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
A = reflector[symbol]
# 2nd rotors
A = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
A = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
A = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
A = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
A = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
A = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
A = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :Optional[Any] ='This is my Python script that emulates the Enigma machine from WWII.'
__snake_case :Any =(1, 1, 1)
__snake_case :Optional[int] ='pictures'
__snake_case :Dict =(rotora, rotora, rotora)
__snake_case :Union[str, Any] =enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb)) | 106 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Optional[Any]=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Dict=32, UpperCamelCase__ : Any=2, UpperCamelCase__ : Optional[int]=4, UpperCamelCase__ : Tuple=37, UpperCamelCase__ : Union[str, Any]="gelu", UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Any=0.1, UpperCamelCase__ : Union[str, Any]=5_12, UpperCamelCase__ : Optional[Any]=16, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : List[str]=3, UpperCamelCase__ : Optional[Any]=4, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=0, ) -> str:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = projection_dim
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
_A = DPRConfig(projection_dim=self.projection_dim, **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any] ) -> int:
_A = TFDPRContextEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : str ) -> int:
_A = TFDPRQuestionEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ) -> Any:
_A = TFDPRReader(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_A = TFDPRModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_A = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
_A = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_A = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4 ) )
| 107 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = """laion/clap-htsat-unfused"""
_UpperCAmelCase = tempfile.mkdtemp()
def lowerCamelCase ( self : List[str] , **lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase )
def lowerCamelCase ( self : List[Any] , **lowerCamelCase : Any ) -> Any:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_UpperCAmelCase = self.get_feature_extractor(do_normalize=lowerCamelCase , padding_value=1.0 )
_UpperCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" )
_UpperCAmelCase = processor(audios=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = """This is a test string"""
_UpperCAmelCase = processor(text=lowerCamelCase )
_UpperCAmelCase = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(lowerCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , ) | 108 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : List[Any] = ['pixel_values']
def __init__( self : Dict ,lowerCamelCase : bool = True ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : int = 0.9 ,lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase : bool = True ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : Union[int, float] = 1 / 255 ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 224}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase ,default_to_square=lowerCamelCase )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase ,param_name="""crop_size""" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = crop_pct
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Dict[str, int] ,lowerCamelCase : Optional[float] = None ,lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : Any ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase ,default_to_square=lowerCamelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__SCREAMING_SNAKE_CASE = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__SCREAMING_SNAKE_CASE = int(size["""height"""] / crop_pct )
else:
__SCREAMING_SNAKE_CASE = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCamelCase ,size=lowerCamelCase ,default_to_square=lowerCamelCase )
else:
if "shortest_edge" in size:
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCamelCase ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase )
elif "height" in size and "width" in size:
__SCREAMING_SNAKE_CASE = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowerCamelCase ) )
return resize(lowerCamelCase ,size=lowerCamelCase ,resample=lowerCamelCase ,data_format=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Dict[str, int] ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : int ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : np.ndarray ,lowerCamelCase : Union[int, float] ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : List[str] ,):
'''simple docstring'''
return rescale(lowerCamelCase ,scale=lowerCamelCase ,data_format=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : np.ndarray ,lowerCamelCase : Union[float, List[float]] ,lowerCamelCase : Union[float, List[float]] ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : int ,):
'''simple docstring'''
return normalize(lowerCamelCase ,mean=lowerCamelCase ,std=lowerCamelCase ,data_format=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : ImageInput ,lowerCamelCase : bool = None ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : int = None ,lowerCamelCase : PILImageResampling = None ,lowerCamelCase : bool = None ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : bool = None ,lowerCamelCase : float = None ,lowerCamelCase : bool = None ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,lowerCamelCase : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase ,default_to_square=lowerCamelCase )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase ,param_name="""crop_size""" )
__SCREAMING_SNAKE_CASE = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=lowerCamelCase ,size=lowerCamelCase ,crop_pct=lowerCamelCase ,resample=lowerCamelCase ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=lowerCamelCase ,size=lowerCamelCase ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCamelCase ,scale=lowerCamelCase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCamelCase ,mean=lowerCamelCase ,std=lowerCamelCase ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase ,lowerCamelCase ) for image in images]
__SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase ,tensor_type=lowerCamelCase )
| 109 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger()
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCAmelCase__ : Dict = timm.create_model('levit_128s' ,pretrained=_snake_case )
else:
UpperCAmelCase__ : Optional[Any] = timm.create_model('levit_128' ,pretrained=_snake_case )
if hidden_sizes == 192:
UpperCAmelCase__ : int = timm.create_model('levit_192' ,pretrained=_snake_case )
if hidden_sizes == 256:
UpperCAmelCase__ : Tuple = timm.create_model('levit_256' ,pretrained=_snake_case )
if hidden_sizes == 384:
UpperCAmelCase__ : int = timm.create_model('levit_384' ,pretrained=_snake_case )
from_model.eval()
UpperCAmelCase__ : Any = LevitForImageClassificationWithTeacher(_snake_case ).eval()
UpperCAmelCase__ : List[Any] = OrderedDict()
UpperCAmelCase__ : Optional[Any] = from_model.state_dict()
UpperCAmelCase__ : int = list(from_model.state_dict().keys() )
UpperCAmelCase__ : Optional[Any] = list(our_model.state_dict().keys() )
print(len(_snake_case ) ,len(_snake_case ) )
for i in range(len(_snake_case ) ):
UpperCAmelCase__ : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(_snake_case )
UpperCAmelCase__ : Any = torch.randn((2, 3, 224, 224) )
UpperCAmelCase__ : Optional[Any] = from_model(_snake_case )
UpperCAmelCase__ : Tuple = our_model(_snake_case ).logits
assert torch.allclose(_snake_case ,_snake_case ), "The model logits don't match the original one."
UpperCAmelCase__ : List[Any] = name
print(_snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCAmelCase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( _snake_case ,_snake_case = None ,_snake_case = True ):
UpperCAmelCase__ : int = 'imagenet-1k-id2label.json'
UpperCAmelCase__ : List[str] = 1000
UpperCAmelCase__ : List[str] = (1, num_labels)
UpperCAmelCase__ : Optional[int] = 'huggingface/label-files'
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type='dataset' ) ,'r' ) )
UpperCAmelCase__ : List[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Dict = idalabel
UpperCAmelCase__ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : List[str] = partial(_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case )
UpperCAmelCase__ : Optional[Any] = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
UpperCAmelCase__ : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,_snake_case ,names_to_config[model_name] ,_snake_case ,_snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 110 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = """ylacombe/bark-small"""
_SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
_SCREAMING_SNAKE_CASE : Dict = """This is a test string"""
_SCREAMING_SNAKE_CASE : Optional[Any] = """speaker_embeddings_path.json"""
_SCREAMING_SNAKE_CASE : Tuple = """speaker_embeddings"""
def UpperCAmelCase_ ( self , **__snake_case ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[int] = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE : List[str] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_SCREAMING_SNAKE_CASE : List[Any] = 35
_SCREAMING_SNAKE_CASE : str = 2
_SCREAMING_SNAKE_CASE : str = 8
_SCREAMING_SNAKE_CASE : List[Any] = {
"""semantic_prompt""": np.ones(_lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_SCREAMING_SNAKE_CASE : List[str] = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[str] = processor(text=self.input_string )
_SCREAMING_SNAKE_CASE : int = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 533 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple =(
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
lowerCamelCase : int ="CIDAS/clipseg-rd64-refined"
lowerCamelCase : Union[str, Any] ="image_segmenter"
lowerCamelCase : Dict =CLIPSegForImageSegmentation
lowerCamelCase : Tuple =["image", "text"]
lowerCamelCase : Optional[int] =["image"]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["vision"] )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCAmelCase , return_tensors="pt" )
def __a ( self , lowerCAmelCase__ ) -> Dict:
with torch.no_grad():
a : Any = self.model(**_lowerCAmelCase ).logits
return logits
def __a ( self , lowerCAmelCase__ ) -> int:
a : Dict = outputs.cpu().detach().numpy()
a : Optional[Any] = 0
a : Dict = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 633 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
A__: Dict = '''\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'''
A__: Optional[int] = '''\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'''
A__: Optional[Any] = '''\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :int="uniform_average" , SCREAMING_SNAKE_CASE :Dict=True ) -> Union[str, Any]:
'''simple docstring'''
_a : List[Any] =mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: List[Any] = SwinvaConfig()
__lowerCAmelCase: Dict = swinva_name.split("_" )
__lowerCAmelCase: str = name_split[1]
if "to" in name_split[3]:
__lowerCAmelCase: List[str] = int(name_split[3][-3:] )
else:
__lowerCAmelCase: int = int(name_split[3] )
if "to" in name_split[2]:
__lowerCAmelCase: Any = int(name_split[2][-2:] )
else:
__lowerCAmelCase: List[Any] = int(name_split[2][6:] )
if model_size == "tiny":
__lowerCAmelCase: Any = 9_6
__lowerCAmelCase: Union[str, Any] = (2, 2, 6, 2)
__lowerCAmelCase: Optional[Any] = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowerCAmelCase: Union[str, Any] = 9_6
__lowerCAmelCase: Union[str, Any] = (2, 2, 1_8, 2)
__lowerCAmelCase: Optional[int] = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowerCAmelCase: Optional[int] = 1_2_8
__lowerCAmelCase: Tuple = (2, 2, 1_8, 2)
__lowerCAmelCase: Dict = (4, 8, 1_6, 3_2)
else:
__lowerCAmelCase: Union[str, Any] = 1_9_2
__lowerCAmelCase: Dict = (2, 2, 1_8, 2)
__lowerCAmelCase: List[str] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowerCAmelCase: str = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowerCAmelCase: Optional[int] = 2_1_8_4_1
__lowerCAmelCase: Optional[Any] = "huggingface/label-files"
__lowerCAmelCase: Any = "imagenet-22k-id2label.json"
__lowerCAmelCase: int = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase: int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase: int = idalabel
__lowerCAmelCase: Tuple = {v: k for k, v in idalabel.items()}
else:
__lowerCAmelCase: Any = 1_0_0_0
__lowerCAmelCase: Any = "huggingface/label-files"
__lowerCAmelCase: str = "imagenet-1k-id2label.json"
__lowerCAmelCase: Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase: Optional[Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase: int = idalabel
__lowerCAmelCase: Any = {v: k for k, v in idalabel.items()}
__lowerCAmelCase: str = img_size
__lowerCAmelCase: List[Any] = num_classes
__lowerCAmelCase: Union[str, Any] = embed_dim
__lowerCAmelCase: List[str] = depths
__lowerCAmelCase: str = num_heads
__lowerCAmelCase: Tuple = window_size
return config
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if "patch_embed.proj" in name:
__lowerCAmelCase: Optional[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase: List[str] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase: Any = "encoder." + name
if "attn.proj" in name:
__lowerCAmelCase: Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowerCAmelCase: int = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCAmelCase: List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase: Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase: Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase: List[Any] = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowerCAmelCase: int = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowerCAmelCase: List[str] = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowerCAmelCase: int = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowerCAmelCase: List[str] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowerCAmelCase: str = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase: Dict = "layernorm.bias"
if "head" in name:
__lowerCAmelCase: List[str] = name.replace("head" , "classifier" )
else:
__lowerCAmelCase: List[Any] = "swinv2." + name
return name
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase: Optional[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "mask" in key:
continue
elif "qkv" in key:
__lowerCAmelCase: Tuple = key.split("." )
__lowerCAmelCase: Tuple = int(key_split[1] )
__lowerCAmelCase: List[Any] = int(key_split[3] )
__lowerCAmelCase: List[str] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCAmelCase: Tuple = val[:dim, :]
__lowerCAmelCase: Tuple = val[dim : dim * 2, :]
__lowerCAmelCase: Union[str, Any] = val[-dim:, :]
else:
__lowerCAmelCase: List[Any] = val[:dim]
__lowerCAmelCase: Union[str, Any] = val[
dim : dim * 2
]
__lowerCAmelCase: Tuple = val[-dim:]
else:
__lowerCAmelCase: List[Any] = val
return orig_state_dict
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: List[Any] = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
__lowerCAmelCase: List[str] = get_swinva_config(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase: List[str] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase: int = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase: Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase: Optional[int] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowerCAmelCase: str = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
__lowerCAmelCase: str = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
__lowerCAmelCase: Dict = timm_model(inputs["pixel_values"] )
__lowerCAmelCase: int = model(**SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 346 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
lowerCAmelCase__ : Tuple ={
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = "umt5"
UpperCamelCase__ : List[Any] = ["past_key_values"]
def __init__( self , _A=250_112 , _A=512 , _A=64 , _A=1_024 , _A=8 , _A=None , _A=6 , _A=32 , _A=128 , _A=0.1 , _A=1e-6 , _A=1.0 , _A="gated-gelu" , _A=True , _A=True , _A="T5Tokenizer" , _A=True , _A=0 , _A=1 , _A=0 , **_A , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = d_kv
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = num_layers
__SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = relative_attention_max_distance
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = feed_forward_proj
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = self.feed_forward_proj.split('-' )
__SCREAMING_SNAKE_CASE = act_info[-1]
__SCREAMING_SNAKE_CASE = act_info[0] == 'gated'
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE = 'gelu_new'
@property
def _A ( self ):
'''simple docstring'''
return self.d_model
@property
def _A ( self ):
'''simple docstring'''
return self.num_heads
@property
def _A ( self ):
'''simple docstring'''
return self.num_layers
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__SCREAMING_SNAKE_CASE = 'past_encoder_sequence + sequence'
__SCREAMING_SNAKE_CASE = {0: 'batch'}
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'decoder_sequence'}
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _A ( self ):
'''simple docstring'''
return 13
@property
def _A ( self ):
'''simple docstring'''
return 5e-4
| 148 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = []
lowerCamelCase__ = 11
lowerCamelCase__ = int("""1""" + """0""" * digit_len )
for num in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase__ = 10
return solutions
def A__ ( __lowerCAmelCase : int = 2 ):
lowerCamelCase__ = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = Fraction(SCREAMING_SNAKE_CASE_ )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 50 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = "markuplm"
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=0 , A_=0 , A_=2 , A_=256 , A_=1024 , A_=216 , A_=1001 , A_=32 , A_=50 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase: str = vocab_size
_lowercase: Dict = hidden_size
_lowercase: Tuple = num_hidden_layers
_lowercase: List[str] = num_attention_heads
_lowercase: List[Any] = hidden_act
_lowercase: str = intermediate_size
_lowercase: int = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: int = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: List[str] = initializer_range
_lowercase: Optional[int] = layer_norm_eps
_lowercase: Tuple = position_embedding_type
_lowercase: Union[str, Any] = use_cache
_lowercase: List[Any] = classifier_dropout
# additional properties
_lowercase: Any = max_depth
_lowercase: List[str] = max_xpath_tag_unit_embeddings
_lowercase: Union[str, Any] = max_xpath_subs_unit_embeddings
_lowercase: Tuple = tag_pad_id
_lowercase: Any = subs_pad_id
_lowercase: List[str] = xpath_unit_hidden_size
| 353 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import os
def _lowerCamelCase( a = "input.txt" ):
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) as input_file:
__a = [
[int(SCREAMING_SNAKE_CASE_ ) for element in line.split("," )]
for line in input_file.readlines()
]
__a = len(SCREAMING_SNAKE_CASE_ )
__a = len(matrix[0] )
__a = [[-1 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
__a = matrix[i][0]
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
__a = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
__a = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 528 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowercase_ = int(re.match(r""".*layer_(\d*).*""" , SCREAMING_SNAKE_CASE_ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if dtype == torch.bool:
return 1 / 8
lowercase_ = re.search(r"""[^\d](\d+)$""" , str(SCREAMING_SNAKE_CASE_ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
lowercase_ = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if bloom_config_file == "":
lowercase_ = BloomConfig()
else:
lowercase_ = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
if shard_model:
lowercase_ = os.listdir(SCREAMING_SNAKE_CASE_ )
lowercase_ = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = {"""weight_map""": {}, """metadata""": {}}
lowercase_ = 0
lowercase_ = None
lowercase_ = BloomConfig()
for j, file in enumerate(SCREAMING_SNAKE_CASE_ ):
print("""Processing file: {}""".format(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
lowercase_ = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ = list(temp.keys() )
for key in keys:
lowercase_ = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
lowercase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ = tensors[key] / pretraining_tp
torch.save(
SCREAMING_SNAKE_CASE_ , os.path.join(
SCREAMING_SNAKE_CASE_ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowercase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowercase_ = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) )
lowercase_ = BloomConfig()
lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase_ = total_size
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = BloomModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = os.listdir(SCREAMING_SNAKE_CASE_ )
lowercase_ = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
for i, file in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase_ = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
lowercase_ = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ = list(temp.keys() )
for key in keys:
lowercase_ = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
lowercase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ = tensors[key] / pretraining_tp
lowercase_ = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowercase_ = set(other_keys.missing_keys )
else:
lowercase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowercase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 412 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PegasusTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _UpperCamelCase ( self ,**A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def _UpperCamelCase ( self ,A ):
return ("This is a test", "This is a test")
def _UpperCamelCase ( self ):
UpperCAmelCase = """</s>"""
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""</s>""" )
self.assertEqual(vocab_keys[-1] ,"""v""" )
self.assertEqual(len(_lowerCAmelCase ) ,1_103 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_103 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase = rust_tokenizer([raw_input_str] ,return_tensors=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids[0]
UpperCAmelCase = py_tokenizer([raw_input_str] ,return_tensors=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
UpperCAmelCase = tokenizer([raw_input_str] ,return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
UpperCAmelCase = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
UpperCAmelCase = tokenizer([raw_input_str] ,return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase = self._large_tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors="""pt""" )
UpperCAmelCase = self._large_tokenizer(
text_target=_lowerCAmelCase ,max_length=5 ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = {"""input_ids""": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""google/bigbird-pegasus-large-arxiv""" ,revision="""ba85d0851d708441f91440d509690f1ab6353415""" ,)
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PegasusTokenizer(_lowerCAmelCase ,offset=0 ,mask_token_sent=_lowerCAmelCase ,mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _UpperCamelCase ( self ,**A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def _UpperCamelCase ( self ,A ):
return ("This is a test", "This is a test")
def _UpperCamelCase ( self ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase = rust_tokenizer([raw_input_str] ,return_tensors=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids[0]
UpperCAmelCase = py_tokenizer([raw_input_str] ,return_tensors=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""This is going to be way too long.""" * 1_000, """short example"""]
UpperCAmelCase = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase = self._large_tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors="""pt""" )
UpperCAmelCase = self._large_tokenizer(
text_target=_lowerCAmelCase ,max_length=5 ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
def _UpperCamelCase ( self ):
UpperCAmelCase = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase = self._large_tokenizer(_lowerCAmelCase ).input_ids
self.assertListEqual(
_lowerCAmelCase ,[182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] ,)
| 341 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : Optional[Any] = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 576 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 533 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_lowerCAmelCase , )
assert hasattr(self , "env" )
def __a ( self , lowerCAmelCase__=1 ) -> Tuple:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
TrainingJobAnalytics(_lowerCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def __a ( self ) -> Optional[int]:
# create estimator
a : Union[str, Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
a : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _lowerCAmelCase )
| 633 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Tuple = {'''vocab_file''': '''spiece.model'''}
A__: Optional[Any] = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=False , SCREAMING_SNAKE_CASE :Optional[Any]="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :List[str]="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<sep>" , SCREAMING_SNAKE_CASE :List[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[Any]="<cls>" , SCREAMING_SNAKE_CASE :Dict="<mask>" , SCREAMING_SNAKE_CASE :Optional[int]=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE :Union[str, Any] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
_a : Dict =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_a : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_a : List[str] =3
_a : Dict =do_lower_case
_a : Union[str, Any] =remove_space
_a : str =keep_accents
_a : List[Any] =vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : Tuple =jieba
_a : Dict =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model )
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Tuple ={self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Tuple:
'''simple docstring'''
_a : List[str] =self.__dict__.copy()
_a : Dict =None
return state
def __setstate__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> Dict:
'''simple docstring'''
_a : Tuple =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Dict ={}
_a : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> str:
'''simple docstring'''
if self.remove_space:
_a : Optional[Any] =""" """.join(inputs.strip().split() )
else:
_a : Dict =inputs
_a : Dict =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] =unicodedata.normalize("""NFKD""" , _lowerCAmelCase )
_a : Tuple ="""""".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_a : Dict =outputs.lower()
return outputs
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : int =self.preprocess_text(_lowerCAmelCase )
_a : Union[str, Any] =self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_a : Optional[int] =[]
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : List[str] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : str =cur_pieces[1:]
else:
_a : int =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCAmelCase )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCAmelCase )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str ) -> Optional[Any]:
'''simple docstring'''
_a : Tuple ="""""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple = None ) -> List[int]:
'''simple docstring'''
_a : int =[self.sep_token_id]
_a : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict = None , SCREAMING_SNAKE_CASE :str = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any] = None ) -> List[int]:
'''simple docstring'''
_a : str =[self.sep_token_id]
_a : str =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : Union[str, Any] =os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
_a : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self :List[Any] , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]:
'''simple docstring'''
_a : str =super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_a : Optional[Any] =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 694 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 | 0 |
"""simple docstring"""
__A = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 346 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import random
from typing import Any
def __lowercase ( a__ ) -> Dict:
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
__SCREAMING_SNAKE_CASE = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
__SCREAMING_SNAKE_CASE = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ : Tuple =[0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : Optional[int] =['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 148 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase : List[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int=100 , __lowerCAmelCase : Optional[int]=" " ):
lowerCamelCase__ = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def A__ ( __lowerCAmelCase : dict ):
lowerCamelCase__ , lowerCamelCase__ = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else """""" )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def A__ ( __lowerCAmelCase : dict , __lowerCAmelCase : DPRContextEncoder , __lowerCAmelCase : DPRContextEncoderTokenizerFast ):
lowerCamelCase__ = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=SCREAMING_SNAKE_CASE_ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCamelCase__ = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def A__ ( __lowerCAmelCase : "RagExampleArguments" , __lowerCAmelCase : "ProcessingArguments" , __lowerCAmelCase : "IndexHnswArguments" , ):
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCamelCase__ = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCamelCase__ = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCamelCase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCamelCase__ = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCamelCase__ = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCamelCase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCamelCase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCamelCase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
default=str(Path(a ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) ,metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} ,)
_UpperCamelCase = field(
default=a ,metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} ,)
_UpperCamelCase = field(
default='facebook/rag-sequence-nq' ,metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} ,)
_UpperCamelCase = field(
default='facebook/dpr-ctx_encoder-multiset-base' ,metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} ,)
_UpperCamelCase = field(
default=str(Path(a ).parent / 'test_run' / 'dummy-kb' ) ,metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} ,)
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
default=a ,metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} ,)
_UpperCamelCase = field(
default=16 ,metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} ,)
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
default=768 ,metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} ,)
_UpperCamelCase = field(
default=128 ,metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase : Dict = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase : int = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 50 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = "distilbert"
UpperCamelCase_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , A_=3_0522 , A_=512 , A_=False , A_=6 , A_=12 , A_=768 , A_=4 * 768 , A_=0.1 , A_=0.1 , A_="gelu" , A_=0.02 , A_=0.1 , A_=0.2 , A_=0 , **A_ , ) -> int:
"""simple docstring"""
_lowercase: Any = vocab_size
_lowercase: Union[str, Any] = max_position_embeddings
_lowercase: Tuple = sinusoidal_pos_embds
_lowercase: Dict = n_layers
_lowercase: Union[str, Any] = n_heads
_lowercase: Optional[int] = dim
_lowercase: Union[str, Any] = hidden_dim
_lowercase: Union[str, Any] = dropout
_lowercase: str = attention_dropout
_lowercase: int = activation
_lowercase: List[str] = initializer_range
_lowercase: List[Any] = qa_dropout
_lowercase: Union[str, Any] = seq_classif_dropout
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase: str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowercase: Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 353 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase( a , a=0.9_99 , a="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__a = []
for i in range(SCREAMING_SNAKE_CASE_ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
_snake_case : Any = 2
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.0_0085 , lowerCamelCase = 0.012 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = "epsilon" , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = 1.0 , lowerCamelCase = "linspace" , lowerCamelCase = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
__a = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__a = use_karras_sigmas
def a__ ( self , lowerCamelCase , lowerCamelCase=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_lowerCAmelCase ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = self.index_for_timestep(_lowerCAmelCase )
__a = self.sigmas[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(_lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'." )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = np.log(_lowerCAmelCase )
__a = np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase ) ) , _lowerCAmelCase )
if self.config.use_karras_sigmas:
__a = self._convert_to_karras(in_sigmas=_lowerCAmelCase , num_inference_steps=self.num_inference_steps )
__a = np.array([self._sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) for sigma in sigmas] )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase )
__a = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.from_numpy(_lowerCAmelCase )
__a = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCAmelCase ).startswith("mps" ):
# mps does not support float64
__a = timesteps.to(_lowerCAmelCase , dtype=torch.floataa )
else:
__a = timesteps.to(device=_lowerCAmelCase )
# empty dt and derivative
__a = None
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_lowerCAmelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
# get log sigma
__a = np.log(_lowerCAmelCase )
# get distribution
__a = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__a = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = log_sigmas[low_idx]
__a = log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = np.clip(_lowerCAmelCase , 0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.reshape(sigma.shape )
return t
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = in_sigmas[-1].item()
__a = in_sigmas[0].item()
__a = 7.0 # 7.0 is the value used in the paper
__a = np.linspace(0 , 1 , _lowerCAmelCase )
__a = sigma_min ** (1 / rho)
__a = sigma_max ** (1 / rho)
__a = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def a__ ( self ):
return self.dt is None
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ):
__a = self.index_for_timestep(_lowerCAmelCase )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_next
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_next
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__a = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
__a = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_next - sigma_hat
# store for 2nd order step
__a = derivative
__a = dt
__a = sample
else:
# 2. 2nd order / Heun's method
__a = (sample - pred_original_sample) / sigma_next
__a = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__a = self.dt
__a = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__a = None
__a = None
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 528 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = "deit"
def __init__( self : int , UpperCamelCase__ : int=768 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : int=12 , UpperCamelCase__ : Tuple=3_072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : int=1e-12 , UpperCamelCase__ : Any=224 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : int=3 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Tuple=16 , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = qkv_bias
lowercase_ = encoder_stride
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
| 412 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 341 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
import random
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Tuple = num - 1
__snake_case : Dict = 0
while s % 2 == 0:
__snake_case : Optional[int] = s // 2
t += 1
for _ in range(5 ):
__snake_case : List[str] = random.randrange(2 , num - 1 )
__snake_case : Tuple = pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if v != 1:
__snake_case : Any = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__snake_case : List[Any] = i + 1
__snake_case : List[Any] = (v**2) % num
return True
def UpperCAmelCase__( __UpperCAmelCase : int ):
if num < 2:
return False
__snake_case : Dict = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__( __UpperCAmelCase : int = 10_24 ):
while True:
__snake_case : Dict = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE_ ):
return num
if __name__ == "__main__":
__magic_name__ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 576 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : str = "dpr"
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case = 0 , **__snake_case , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : str = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : int = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : int = position_embedding_type
| 533 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float =0.0
lowerCamelCase : int =1
lowerCamelCase : int =1
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =False
lowerCamelCase : bool =False
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Tuple = []
a : int = []
for i in range(self.num_layers ):
a : str = self.in_channels if i == 0 else self.out_channels
a : Union[str, Any] = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
a : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
a : List[str] = resnets
a : Optional[Any] = attentions
if self.add_downsample:
a : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> Any:
a : Optional[int] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a : Optional[Any] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
a : int = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
a : str = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float =0.0
lowerCamelCase : int =1
lowerCamelCase : bool =True
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> List[Any]:
a : List[str] = []
for i in range(self.num_layers ):
a : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
a : Optional[Any] = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
a : List[Any] = resnets
if self.add_downsample:
a : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = ()
for resnet in self.resnets:
a : List[str] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
a : Union[str, Any] = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float =0.0
lowerCamelCase : int =1
lowerCamelCase : int =1
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =False
lowerCamelCase : bool =False
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : List[str] = []
a : List[Any] = []
for i in range(self.num_layers ):
a : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
a : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
a : List[Any] = resnets
a : Union[str, Any] = attentions
if self.add_upsample:
a : Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> Any:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a : Tuple = res_hidden_states_tuple[-1]
a : List[Any] = res_hidden_states_tuple[:-1]
a : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a : Union[str, Any] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
a : Optional[Any] = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
a : Tuple = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float =0.0
lowerCamelCase : int =1
lowerCamelCase : bool =True
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = []
for i in range(self.num_layers ):
a : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
a : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
a : Optional[int] = resnets
if self.add_upsample:
a : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> Optional[Any]:
for resnet in self.resnets:
# pop res hidden states
a : Optional[Any] = res_hidden_states_tuple[-1]
a : Optional[Any] = res_hidden_states_tuple[:-1]
a : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a : List[Any] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
a : Optional[int] = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : float =0.0
lowerCamelCase : int =1
lowerCamelCase : int =1
lowerCamelCase : bool =False
lowerCamelCase : bool =False
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> str:
# there is always at least one resnet
a : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a : Tuple = []
for _ in range(self.num_layers ):
a : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
a : Optional[int] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
a : Optional[Any] = resnets
a : Tuple = attentions
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> List[str]:
a : Any = self.resnets[0](_lowerCAmelCase , _lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a : Any = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
a : Optional[Any] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
return hidden_states
| 633 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A__: Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
A__: int = '''\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple=8 ) -> Optional[int]:
_a : Dict =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a : List[Any] =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A__ ( UpperCAmelCase__ ):
def __init__( self :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
_a : str =2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if latents is None:
_a : Dict =randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
_a : Optional[int] =latents.to(_lowerCAmelCase )
_a : Optional[Any] =latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[str]=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_a : int =torch.device(f"cuda:{gpu_id}" )
_a : Optional[Any] =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str=0 ) -> Tuple:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_a : str =torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a : str =None
for cpu_offloaded_model in [self.unet, self.movq]:
_a , _a : Dict =cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
_a : Union[str, Any] =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self :Optional[int] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] = 5_1_2 , SCREAMING_SNAKE_CASE :int = 5_1_2 , SCREAMING_SNAKE_CASE :Tuple = 1_0_0 , SCREAMING_SNAKE_CASE :Dict = 4.0 , SCREAMING_SNAKE_CASE :Tuple = 1 , SCREAMING_SNAKE_CASE :Optional[Any] = None , SCREAMING_SNAKE_CASE :List[str] = None , SCREAMING_SNAKE_CASE :Tuple = "pil" , SCREAMING_SNAKE_CASE :Union[str, Any] = True , ) -> List[str]:
'''simple docstring'''
_a : Union[str, Any] =self._execution_device
_a : Optional[int] =guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_a : int =torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_a : Optional[Any] =torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_a : Union[str, Any] =torch.cat(_lowerCAmelCase , dim=0 )
_a : Any =image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_a : int =image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
_a : Union[str, Any] =negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
_a : Optional[int] =hint.repeat_interleave(_lowerCAmelCase , dim=0 )
_a : Union[str, Any] =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
_a : List[str] =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
_a : Union[str, Any] =self.scheduler.timesteps
_a : Any =self.movq.config.latent_channels
_a , _a : Dict =downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
_a : List[str] =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_a : List[Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a : Optional[Any] ={"""image_embeds""": image_embeds, """hint""": hint}
_a : Any =self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
_a , _a : Any =noise_pred.split(latents.shape[1] , dim=1 )
_a , _a : List[Any] =noise_pred.chunk(2 )
_a , _a : Union[str, Any] =variance_pred.chunk(2 )
_a : Any =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a : Optional[Any] =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a , _a : Union[str, Any] =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a : Optional[int] =self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
_a : List[str] =self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_a : Optional[int] =image * 0.5 + 0.5
_a : Any =image.clamp(0 , 1 )
_a : Tuple =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a : List[Any] =self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: int = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE_ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE_ ))
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
__lowerCAmelCase: Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCAmelCase: str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCAmelCase: List[Any] = random.choice(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Dict:
__lowerCAmelCase: Dict = []
# Generate more children proportionally to the fitness score.
__lowerCAmelCase: List[Any] = int(parent_a[1] * 1_0_0 ) + 1
__lowerCAmelCase: List[str] = 1_0 if child_n >= 1_0 else child_n
for _ in range(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase: Any = population_score[random.randint(0 , SCREAMING_SNAKE_CASE_ )][0]
__lowerCAmelCase , __lowerCAmelCase: List[str] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE_ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return pop
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True ) -> int:
if N_POPULATION < N_SELECTED:
__lowerCAmelCase: Any = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCAmelCase: Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCAmelCase: Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Generate random starting population.
__lowerCAmelCase: Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCAmelCase , __lowerCAmelCase: Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCAmelCase: str = [evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for item in population]
# Check if there is a matching evolution.
__lowerCAmelCase: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCAmelCase: str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE_ )
# Normalize population score to be between 0 and 1.
__lowerCAmelCase: Dict = [
(item, score / len(SCREAMING_SNAKE_CASE_ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE_ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE_ ) > N_POPULATION:
break
if __name__ == "__main__":
__A = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__A = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
__A , __A , __A = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 346 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 148 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCamelCase : List[Any] = get_logger(__name__)
UpperCamelCase : List[str] = Path(__file__).parent / 'model_card_template.md'
UpperCamelCase : List[str] = uuida().hex
UpperCamelCase : Any = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase : Any = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def A__ ( __lowerCAmelCase : Union[Dict, str, None] = None ):
lowerCamelCase__ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + user_agent
return ua
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None ):
if token is None:
lowerCamelCase__ = HfFolder.get_token()
if organization is None:
lowerCamelCase__ = whoami(SCREAMING_SNAKE_CASE_ )["""name"""]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(SCREAMING_SNAKE_CASE_ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase__ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE_ , """hub_token""" ) else None
lowerCamelCase__ = get_full_repo_name(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , repo_name=SCREAMING_SNAKE_CASE_ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE_ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE_ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE_ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE_ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE_ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE_ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE_ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE_ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE_ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase__ = os.path.join(args.output_dir , """README.md""" )
model_card.save(SCREAMING_SNAKE_CASE_ )
def A__ ( __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase__ = str(Path(SCREAMING_SNAKE_CASE_ ).as_posix() )
lowerCamelCase__ = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE_ )
if search is None:
return None
lowerCamelCase__ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCamelCase : Tuple = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
UpperCamelCase : Optional[Any] = os.path.join(hf_cache_home, 'diffusers')
def A__ ( __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None ):
if new_cache_dir is None:
lowerCamelCase__ = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase__ = old_diffusers_cache
lowerCamelCase__ = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
lowerCamelCase__ = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase__ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE_ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
os.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
try:
os.symlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCamelCase : int = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
UpperCamelCase : str = 0
else:
with open(cache_version_file) as f:
try:
UpperCamelCase : Tuple = int(f.read())
except ValueError:
UpperCamelCase : List[str] = 0
if cache_version < 1:
UpperCamelCase : Dict = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
UpperCamelCase : List[str] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'the directory exists and can be written to.'
)
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if variant is not None:
lowerCamelCase__ = weights_name.split(""".""" )
lowerCamelCase__ = splits[:-1] + [variant] + splits[-1:]
lowerCamelCase__ = """.""".join(SCREAMING_SNAKE_CASE_ )
return weights_name
def A__ ( __lowerCAmelCase : List[Any] , *,
__lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[str]=None , ):
lowerCamelCase__ = str(SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE_ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
# Load from a PyTorch checkpoint
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowerCamelCase__ = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE_ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE_ , )
try:
# 2. Load model file as usual
lowerCamelCase__ = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"""this model name. Check the model page at """
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 50 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = "encoder-decoder"
UpperCamelCase_ = True
def __init__( self , **A_ ) -> str:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowercase: Union[str, Any] = kwargs.pop('''encoder''' )
_lowercase: int = encoder_config.pop('''model_type''' )
_lowercase: Optional[Any] = kwargs.pop('''decoder''' )
_lowercase: Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_lowercase: Optional[Any] = AutoConfig.for_model(_lowerCAmelCase , **_lowerCAmelCase )
_lowercase: List[str] = AutoConfig.for_model(_lowerCAmelCase , **_lowerCAmelCase )
_lowercase: Union[str, Any] = True
@classmethod
def lowercase_ ( cls , A_ , A_ , **A_ ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_lowercase: str = True
_lowercase: List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCAmelCase )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Tuple = copy.deepcopy(self.__dict__ )
_lowercase: List[Any] = self.encoder.to_dict()
_lowercase: Optional[int] = self.decoder.to_dict()
_lowercase: int = self.__class__.model_type
return output
| 353 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class snake_case__ ( snake_case_ ):
_snake_case : Union[str, Any] = "swinv2"
_snake_case : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowerCamelCase=224 , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=96 , lowerCamelCase=[2, 2, 6, 2] , lowerCamelCase=[3, 6, 12, 24] , lowerCamelCase=7 , lowerCamelCase=4.0 , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=False , lowerCamelCase=0.02 , lowerCamelCase=1E-5 , lowerCamelCase=32 , **lowerCamelCase , ):
super().__init__(**_lowerCAmelCase )
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(_lowerCAmelCase )
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = layer_norm_eps
__a = initializer_range
__a = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
__a = (0, 0, 0, 0)
| 528 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 412 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=30 ,A=2 ,A=3 ,A=True ,A=True ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=10 ,A=0.02 ,A=None ,A=2 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = ViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = ViTForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = ViTForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = ViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase = model(_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = ViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = ViTModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _UpperCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(_lowerCAmelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
UpperCAmelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1e-4 ) )
@slow
def _UpperCamelCase ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCAmelCase = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(_lowerCAmelCase )
UpperCAmelCase = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" ,size=480 )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" )
UpperCAmelCase = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(_lowerCAmelCase ,interpolate_pos_encoding=_lowerCAmelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,_lowerCAmelCase )
UpperCAmelCase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_lowerCAmelCase ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCamelCase ( self ):
UpperCAmelCase = ViTModel.from_pretrained("""facebook/dino-vits8""" ,torch_dtype=torch.floataa ,device_map="""auto""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" )
UpperCAmelCase = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase = model(_lowerCAmelCase )
| 341 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 576 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
'''simple docstring'''
from math import pow
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_SCREAMING_SNAKE_CASE : Any = int(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return current_sum, solutions_count
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 533 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
a : Optional[Any] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
a : Tuple = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
a : Dict = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
a : List[str] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
a : List[str] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
a : Any = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
a : Dict = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
a : Dict = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
a : Optional[int] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
a : List[Any] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
a : Tuple = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
a : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
a : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
a : Tuple = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
a : int = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
a : Union[str, Any] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
a : int = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
a : Optional[int] = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
a : Optional[Any] = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
a : Tuple = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
a : Tuple = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
a : Dict = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
a : int = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
a : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : int ) ->int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a : Dict = key.split("." )
a, a : List[Any] = int(key_split[2] ), int(key_split[4] )
a : List[str] = config.vision_config.hidden_size
if "weight" in key:
a : Union[str, Any] = val[:dim, :]
a : Optional[int] = val[dim : dim * 2, :]
a : List[str] = val[-dim:, :]
else:
a : Optional[int] = val[:dim]
a : str = val[dim : dim * 2]
a : Optional[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a : Dict = key.split("." )
a : Dict = int(key_split[3] )
a : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Optional[int] = val[
dim : dim * 2, :
]
a : str = val[-dim:, :]
else:
a : Union[str, Any] = val[:dim]
a : str = val[dim : dim * 2]
a : int = val[-dim:]
else:
a : Dict = rename_key(SCREAMING_SNAKE_CASE_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
a : Optional[Any] = val.squeeze_()
else:
a : Tuple = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[Any] , _lowercase : Optional[int]="groupvit-gcc-yfcc" , _lowercase : Optional[Any]=False ) ->Any:
'''simple docstring'''
a : Optional[int] = GroupViTConfig()
a : Tuple = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval()
a : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
a : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a, a : List[str] = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0)
# verify result
a : Union[str, Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
a : Dict = prepare_img()
a : List[str] = processor(text=["a photo of a cat", "a photo of a dog"] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
with torch.no_grad():
a : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
if model_name == "groupvit-gcc-yfcc":
a : Union[str, Any] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
a : Any = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("Successfully saved processor and model to" , SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
a : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 633 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[Any] , *SCREAMING_SNAKE_CASE :List[Any] , **SCREAMING_SNAKE_CASE :List[Any] ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
_a : str ={}
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
""" `placeholder_token` that is not already in the tokenizer.""" )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] , *SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any]=1 , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =[]
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
else:
_a : Any =[]
for i in range(_lowerCAmelCase ):
_a : Optional[int] =placeholder_token + f"_{i}"
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
_a : Tuple =output
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Optional[int]=1.0 ) -> int:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_a : Tuple =[]
for i in range(len(_lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_a : str =self.token_map[placeholder_token]
_a : Optional[int] =tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
_a : Tuple =copy.copy(_lowerCAmelCase )
random.shuffle(_lowerCAmelCase )
_a : int =text.replace(_lowerCAmelCase , """ """.join(_lowerCAmelCase ) )
return text
def __call__( self :Any , SCREAMING_SNAKE_CASE :Dict , *SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :str=1.0 , **SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[int]:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict , *SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=1.0 , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> Tuple:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
| 694 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Any = "falcon"
SCREAMING_SNAKE_CASE_ : List[str] = ["past_key_values"]
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=6_5_0_2_4 , UpperCamelCase__ : int=4_5_4_4 , UpperCamelCase__ : List[str]=3_2 , UpperCamelCase__ : Any=7_1 , UpperCamelCase__ : List[str]=1e-5 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1_1 , UpperCamelCase__ : Any=1_1 , **UpperCamelCase__ : List[str] , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCAmelCase: int = kwargs.pop("n_embed" , _lowerCAmelCase)
__lowerCAmelCase: Dict = hidden_size if n_embed is None else n_embed
__lowerCAmelCase: List[Any] = num_hidden_layers
__lowerCAmelCase: str = num_attention_heads
__lowerCAmelCase: List[str] = layer_norm_epsilon
__lowerCAmelCase: Any = initializer_range
__lowerCAmelCase: Optional[int] = use_cache
__lowerCAmelCase: Union[str, Any] = hidden_dropout
__lowerCAmelCase: Tuple = attention_dropout
__lowerCAmelCase: List[str] = bos_token_id
__lowerCAmelCase: Dict = eos_token_id
__lowerCAmelCase: Any = num_attention_heads if num_kv_heads is None else num_kv_heads
__lowerCAmelCase: Union[str, Any] = alibi
__lowerCAmelCase: Union[str, Any] = new_decoder_architecture
__lowerCAmelCase: str = multi_query # Ignored when new_decoder_architecture is True
__lowerCAmelCase: Tuple = parallel_attn
__lowerCAmelCase: Tuple = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase)
@property
def lowercase_ ( self : List[Any])-> Optional[Any]:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
return not self.alibi
| 346 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ : str =logging.get_logger(__name__)
lowerCAmelCase__ : Dict ={
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = "blenderbot-small"
UpperCamelCase__ : Optional[Any] = ["past_key_values"]
UpperCamelCase__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _A=50_265 , _A=512 , _A=8 , _A=2_048 , _A=16 , _A=8 , _A=2_048 , _A=16 , _A=0.0 , _A=0.0 , _A=True , _A=True , _A="gelu" , _A=512 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1 , _A=False , _A=0 , _A=1 , _A=2 , _A=2 , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def _A ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE = {0: 'batch'}
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'decoder_sequence'}
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = {0: 'batch', 2: 'past_sequence + sequence'}
__SCREAMING_SNAKE_CASE = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super().outputs
else:
__SCREAMING_SNAKE_CASE = super(_lowerCAmelCase , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = {0: 'batch', 2: 'past_sequence + sequence'}
__SCREAMING_SNAKE_CASE = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape
__SCREAMING_SNAKE_CASE = common_inputs['decoder_input_ids'].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
__SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE = min(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
__SCREAMING_SNAKE_CASE = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _A ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = common_inputs['attention_mask'].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
__SCREAMING_SNAKE_CASE = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _A ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _A ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _A ( self , _A , _A , _A , _A ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 148 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=64 ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,token_type_ids=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,next_sentence_label=_lowerCAmelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,start_positions=_lowerCAmelCase ,end_positions=_lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MegatronBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
# test_resize_embeddings = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ):
lowerCamelCase__ = super()._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=_lowerCAmelCase )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_lowerCAmelCase )
return inputs_dict
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MegatronBertModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[Any] ):
return torch.tensor(
SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowerCamelCase__ = os.path.join(os.environ["""MYDIR"""] ,_lowerCAmelCase )
lowerCamelCase__ = MegatronBertModel.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.half()
lowerCamelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
lowerCamelCase__ = model(_lowerCAmelCase )[0]
lowerCamelCase__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape ,_lowerCAmelCase )
lowerCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCamelCase__ = output[0, ii, jj]
lowerCamelCase__ = expected[3 * ii + jj]
lowerCamelCase__ = """ii={} jj={} a={} b={}""".format(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
self.assertTrue(math.isclose(_lowerCAmelCase ,_lowerCAmelCase ,rel_tol=_lowerCAmelCase ,abs_tol=_lowerCAmelCase ) ,msg=_lowerCAmelCase )
| 50 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , A_ , A_=None , A_=True , A_=None , **A_ ) -> Optional[int]:
"""simple docstring"""
_lowercase: str = parent
_lowercase: int = config_class
_lowercase: List[Any] = has_text_modality
_lowercase: Optional[int] = kwargs
_lowercase: Optional[int] = common_properties
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: Union[str, Any] = self.config_class(**self.inputs_dict )
_lowercase: Union[str, Any] = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCAmelCase ):
try:
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCAmelCase ):
try:
_lowercase: int = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Union[str, Any] = self.config_class(**self.inputs_dict )
_lowercase: str = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCAmelCase )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Optional[int] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Optional[Any] = os.path.join(_lowerCAmelCase , '''config.json''' )
config_first.to_json_file(_lowerCAmelCase )
_lowercase: Union[str, Any] = self.config_class.from_json_file(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCAmelCase )
_lowercase: str = self.config_class.from_pretrained(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Any = self.config_class(**self.inputs_dict )
_lowercase: Tuple = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: List[str] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
config_first.save_pretrained(_lowerCAmelCase )
_lowercase: Any = self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: Optional[int] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowercase: Optional[Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowercase_ ( self ) -> str:
"""simple docstring"""
if self.config_class.is_composition:
return
_lowercase: Union[str, Any] = self.config_class()
self.parent.assertIsNotNone(_lowerCAmelCase )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: int = copy.deepcopy(_lowerCAmelCase )
_lowercase: Optional[Any] = self.config_class(**_lowerCAmelCase )
_lowercase: Any = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_lowerCAmelCase , _lowerCAmelCase ) != value:
wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase ), value) )
if len(_lowerCAmelCase ) > 0:
_lowercase: Dict = '''\n'''.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 353 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__a = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
__a = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
__a = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
a = dataset.iloc[:, 1:2].values
a = dataset.iloc[:, 2].values
a , a , a , a = train_test_split(X, y, test_size=0.2, random_state=0)
a = PolynomialFeatures(degree=4)
a = poly_reg.fit_transform(X)
a = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCAmelCase_ ( ):
plt.scatter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color="""red""" )
plt.plot(SCREAMING_SNAKE_CASE_ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 412 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCamelCase = 250004
_UpperCamelCase = 250020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MBartaaTokenizer
SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = MBartaaTokenizer(_lowerCAmelCase ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ):
UpperCAmelCase = """<s>"""
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(_lowerCAmelCase ) ,1_054 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_054 )
def _UpperCamelCase ( self ):
UpperCAmelCase = MBartaaTokenizer(_lowerCAmelCase ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=_lowerCAmelCase )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,)
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,)
@slow
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""facebook/mbart-large-50""" ,revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" ,)
def _UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase )
UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(_lowerCAmelCase )
UpperCAmelCase = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowerCAmelCase ,_lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase ,_lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(_lowerCAmelCase ,legacy_format=_lowerCAmelCase )
UpperCAmelCase = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase ,_lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase ,_lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(_lowerCAmelCase ,legacy_format=_lowerCAmelCase )
UpperCAmelCase = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase ,_lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = "facebook/mbart-large-50-one-to-many-mmt"
SCREAMING_SNAKE_CASE = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _UpperCamelCase ( cls ):
UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" )
UpperCAmelCase = 1
return cls
def _UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] ,250_038 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
self.assertIn(_lowerCAmelCase ,self.tokenizer.all_special_ids )
UpperCAmelCase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase = self.tokenizer.decode(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] ,_lowerCAmelCase )
UpperCAmelCase = 10
UpperCAmelCase = self.tokenizer(_lowerCAmelCase ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] ,_lowerCAmelCase )
self.assertEqual(ids[-1] ,2 )
self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase )
def _UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[250_053, 250_001] )
def _UpperCamelCase ( self ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_lowerCAmelCase )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_lowerCAmelCase ,return_tensors="""pt""" )
UpperCAmelCase = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
UpperCAmelCase = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCAmelCase )
self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer(self.src_text ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=3 ,return_tensors="""pt""" )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=10 ,return_tensors="""pt""" )
UpperCAmelCase = targets["""input_ids"""]
UpperCAmelCase = shift_tokens_right(_lowerCAmelCase ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,{
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} ,)
| 341 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "canine"
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=16_384 , _UpperCAmelCase=16 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase=0xE000 , _UpperCAmelCase=0xE001 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=8 , _UpperCAmelCase=16_384 , _UpperCAmelCase=128 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : Tuple = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : List[str] = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : List[Any] = initializer_range
__snake_case : Tuple = type_vocab_size
__snake_case : Tuple = layer_norm_eps
# Character config:
__snake_case : Any = downsampling_rate
__snake_case : str = upsampling_kernel_size
__snake_case : int = num_hash_functions
__snake_case : Dict = num_hash_buckets
__snake_case : List[Any] = local_transformer_stride
| 576 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : List[Any] = "trocr"
A_ : Optional[int] = ["past_key_values"]
A_ : List[str] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , __snake_case=5_0265 , __snake_case=1024 , __snake_case=12 , __snake_case=16 , __snake_case=4096 , __snake_case="gelu" , __snake_case=512 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=2 , __snake_case=0.02 , __snake_case=0.0 , __snake_case=True , __snake_case=False , __snake_case=True , __snake_case=True , __snake_case=1 , __snake_case=0 , __snake_case=2 , **__snake_case , ):
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = d_model
_SCREAMING_SNAKE_CASE : List[Any] = decoder_layers
_SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
_SCREAMING_SNAKE_CASE : int = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : int = activation_function
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = dropout
_SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
_SCREAMING_SNAKE_CASE : List[Any] = init_std
_SCREAMING_SNAKE_CASE : Any = decoder_layerdrop
_SCREAMING_SNAKE_CASE : Optional[int] = use_cache
_SCREAMING_SNAKE_CASE : Any = scale_embedding
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_learned_position_embeddings
_SCREAMING_SNAKE_CASE : str = layernorm_embedding
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 533 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a : Any = '''\\n\n'''
a : List[str] = '''\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'''
a : Union[str, Any] = '''\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 , lowerCAmelCase__ = True , lowerCAmelCase__=None ) -> int:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a : Any = "cuda"
else:
a : Any = "cuda" if torch.cuda.is_available() else "cpu"
a : str = AutoModelForCausalLM.from_pretrained(_lowerCAmelCase )
a : Tuple = model.to(_lowerCAmelCase )
a : Dict = AutoTokenizer.from_pretrained(_lowerCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_lowerCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a : str = model.config.max_length - 1
else:
a : str = model.config.max_length
a : Optional[int] = tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="pt" , return_attention_mask=_lowerCAmelCase , ).to(_lowerCAmelCase )
a : Any = encodings["input_ids"]
a : List[str] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a : Union[str, Any] = []
a : int = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ) ):
a : List[str] = min(start_index + batch_size , len(_lowerCAmelCase ) )
a : Tuple = encoded_texts[start_index:end_index]
a : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
a : Optional[int] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_lowerCAmelCase )
a : Tuple = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
a : List[str] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_lowerCAmelCase ), attn_mask] , dim=1 )
a : Optional[int] = encoded_batch
with torch.no_grad():
a : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ).logits
a : Dict = out_logits[..., :-1, :].contiguous()
a : List[str] = labels[..., 1:].contiguous()
a : List[str] = attn_mask[..., 1:].contiguous()
a : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _lowerCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_lowerCAmelCase )}
| 633 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__: str = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ViTFeatureExtractor"]
__A = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowercase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
__SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
__SCREAMING_SNAKE_CASE = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
__SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
__SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0]
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0]
__SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
__SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__SCREAMING_SNAKE_CASE = state_dict[bias_name]
__SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__SCREAMING_SNAKE_CASE = f"""encoder.layer.{layer_index}.attention.self."""
__SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
__SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias']
__SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
__SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__SCREAMING_SNAKE_CASE = state_dict[key]
else:
__SCREAMING_SNAKE_CASE = state_dict[key]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
__SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__SCREAMING_SNAKE_CASE = (0, 9)
__SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.'
__SCREAMING_SNAKE_CASE = (24, 30)
__SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist()
__SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
__SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]']
__SCREAMING_SNAKE_CASE = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
__SCREAMING_SNAKE_CASE = {}
for entry in data:
__SCREAMING_SNAKE_CASE = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__SCREAMING_SNAKE_CASE = entity_id
break
__SCREAMING_SNAKE_CASE = f"""{language}:{entity_name}"""
__SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase__ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 148 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = "layoutlmv3"
def __init__( self ,_lowerCAmelCase=5_02_65 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=2 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=64 ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=3 ,_lowerCAmelCase=16 ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
vocab_size=_lowerCAmelCase ,hidden_size=_lowerCAmelCase ,num_hidden_layers=_lowerCAmelCase ,num_attention_heads=_lowerCAmelCase ,intermediate_size=_lowerCAmelCase ,hidden_act=_lowerCAmelCase ,hidden_dropout_prob=_lowerCAmelCase ,attention_probs_dropout_prob=_lowerCAmelCase ,max_position_embeddings=_lowerCAmelCase ,type_vocab_size=_lowerCAmelCase ,initializer_range=_lowerCAmelCase ,layer_norm_eps=_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = max_ad_position_embeddings
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = has_relative_attention_bias
lowerCamelCase__ = rel_pos_bins
lowerCamelCase__ = max_rel_pos
lowerCamelCase__ = has_spatial_attention_bias
lowerCamelCase__ = rel_ad_pos_bins
lowerCamelCase__ = max_rel_ad_pos
lowerCamelCase__ = text_embed
lowerCamelCase__ = visual_embed
lowerCamelCase__ = input_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = classifier_dropout
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = version.parse('1.12' )
@property
def UpperCamelCase_ ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def UpperCamelCase_ ( self ):
return 1E-5
@property
def UpperCamelCase_ ( self ):
return 12
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = 3 ,_lowerCAmelCase = 40 ,_lowerCAmelCase = 40 ,):
setattr(processor.image_processor ,"""apply_ocr""" ,_lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ = compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ = processor.tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
lowerCamelCase__ = compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCamelCase__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCamelCase__ = self._generate_dummy_images(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = dict(
processor(
_lowerCAmelCase ,text=_lowerCAmelCase ,boxes=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,) )
return inputs
| 50 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.