code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase_ : str = 3
def A__ ( lowerCamelCase ) -> int:
print("""Generating primitive root of p""" )
while True:
UpperCamelCase_: Any = random.randrange(3 , lowerCamelCase )
if pow(lowerCamelCase , 2 , lowerCamelCase ) == 1:
continue
if pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) == 1:
continue
return g
def A__ ( lowerCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
UpperCamelCase_: Any = rabin_miller.generate_large_prime(lowerCamelCase ) # select large prime number.
UpperCamelCase_: List[Any] = primitive_root(lowerCamelCase ) # one primitive root on modulo p.
UpperCamelCase_: int = random.randrange(3 , lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase_: int = cryptomath.find_mod_inverse(pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCamelCase_: Optional[Any] = (key_size, e_a, e_a, p)
UpperCamelCase_: Optional[int] = (key_size, d)
return public_key, private_key
def A__ ( lowerCamelCase , lowerCamelCase ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCamelCase_: str = generate_key(lowerCamelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def A__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" , 20_48 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 716 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
UpperCamelCase_: int = nn.ModuleList(snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : torch.FloatTensor , snake_case_ : Union[torch.Tensor, float, int] , snake_case_ : torch.Tensor , snake_case_ : List[torch.tensor] , snake_case_ : List[float] , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : bool = False , snake_case_ : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(snake_case_ , snake_case_ , self.nets ) ):
UpperCamelCase_: Any = controlnet(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# merge samples
if i == 0:
UpperCamelCase_: List[Any] = down_samples, mid_sample
else:
UpperCamelCase_: Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(snake_case_ , snake_case_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Union[str, os.PathLike] , snake_case_ : bool = True , snake_case_ : Callable = None , snake_case_ : bool = False , snake_case_ : Optional[str] = None , ):
UpperCamelCase_: Any = 0
UpperCamelCase_: Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
snake_case_ , is_main_process=snake_case_ , save_function=snake_case_ , safe_serialization=snake_case_ , variant=snake_case_ , )
idx += 1
UpperCamelCase_: List[Any] = model_path_to_save + f'''_{idx}'''
@classmethod
def lowerCAmelCase__ ( cls : List[str] , snake_case_ : Optional[Union[str, os.PathLike]] , **snake_case_ : Any ):
UpperCamelCase_: Tuple = 0
UpperCamelCase_: Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase_: Union[str, Any] = pretrained_model_path
while os.path.isdir(snake_case_ ):
UpperCamelCase_: List[Any] = ControlNetModel.from_pretrained(snake_case_ , **snake_case_ )
controlnets.append(snake_case_ )
idx += 1
UpperCamelCase_: List[Any] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(snake_case_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(snake_case_ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(snake_case_ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(snake_case_ )
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase_ : Optional[int] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase_ : int = {
"""facebook/blenderbot_small-90M""": 5_12,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self : Any , snake_case_ : Optional[int]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Dict="<|endoftext|>" , snake_case_ : Optional[int]="<|endoftext|>" , snake_case_ : Dict="<|endoftext|>" , snake_case_ : Tuple=False , snake_case_ : Optional[Any]=True , **snake_case_ : str , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case_ , merges=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , ) , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , **snake_case_ , )
UpperCamelCase_: str = add_prefix_space
def lowerCAmelCase__ ( self : str , snake_case_ : List[str] , snake_case_ : str=None ):
UpperCamelCase_: Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Optional[Any] = [self.sep_token_id]
UpperCamelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 718 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 0 |
import argparse
import os
import re
lowerCamelCase_ : int = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase_ : str = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCamelCase_ : Optional[Any] = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def A__ ( lowerCamelCase , lowerCamelCase = False ) -> List[str]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase_: int = f.read()
UpperCamelCase_: Union[str, Any] = content.split("""\n""" )
UpperCamelCase_: Tuple = []
UpperCamelCase_: List[str] = 0
while line_idx < len(lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase_: Union[str, Any] = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase_: Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase_: Optional[Any] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase_: Tuple = sorted(lowerCamelCase , key=lambda lowerCamelCase : _re_identifier.search(lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowerCamelCase ) )
elif "\n".join(lowerCamelCase ) != content:
return True
def A__ ( lowerCamelCase = False ) -> Union[str, Any]:
UpperCamelCase_: Optional[int] = [os.path.join(lowerCamelCase , lowerCamelCase ) for f in os.listdir(lowerCamelCase ) if f.endswith(""".py""" )]
UpperCamelCase_: Dict = [sort_auto_mapping(lowerCamelCase , overwrite=lowerCamelCase ) for fname in fnames]
if not overwrite and any(lowerCamelCase ):
UpperCamelCase_: Any = [f for f, d in zip(lowerCamelCase , lowerCamelCase ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {", ".join(lowerCamelCase )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase_ : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = """▁"""
lowerCamelCase_ : List[str] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowerCamelCase_ : Union[str, Any] = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
lowerCamelCase_ : str = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self : List[Any] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]="<s>" , snake_case_ : int="</s>" , snake_case_ : Tuple="</s>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Tuple="<unk>" , snake_case_ : Dict="m2m100" , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : Tuple=8 , **snake_case_ : Optional[int] , ):
UpperCamelCase_: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_: str = language_codes
UpperCamelCase_: int = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCamelCase_: Optional[int] = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCamelCase_: Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
UpperCamelCase_: Any = vocab_file
UpperCamelCase_: Optional[Any] = load_json(snake_case_ )
UpperCamelCase_: List[str] = {v: k for k, v in self.encoder.items()}
UpperCamelCase_: Optional[Any] = spm_file
UpperCamelCase_: str = load_spm(snake_case_ , self.sp_model_kwargs )
UpperCamelCase_: Union[str, Any] = len(self.encoder )
UpperCamelCase_: Optional[int] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
UpperCamelCase_: Tuple = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
UpperCamelCase_: Tuple = {v: k for k, v in self.lang_token_to_id.items()}
UpperCamelCase_: int = src_lang if src_lang is not None else """en"""
UpperCamelCase_: Optional[Any] = tgt_lang
UpperCamelCase_: str = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCamelCase_: Optional[Any] = num_madeup_words
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__ ( self : int ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Tuple ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self : Any , snake_case_ : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Any ):
UpperCamelCase_: Any = []
UpperCamelCase_: Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase_: Optional[int] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
UpperCamelCase_: Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCamelCase_: str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
UpperCamelCase_: Any = self.__dict__.copy()
UpperCamelCase_: Any = None
return state
def __setstate__( self : Union[str, Any] , snake_case_ : Dict ):
UpperCamelCase_: List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_: List[Any] = {}
UpperCamelCase_: Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__ ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: Tuple = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
UpperCamelCase_: str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
UpperCamelCase_: Dict = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
UpperCamelCase_: Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[str] , snake_case_ : str = "en" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "ro" , **snake_case_ : Dict , ):
UpperCamelCase_: Tuple = src_lang
UpperCamelCase_: str = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : Dict , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase_: str = src_lang
UpperCamelCase_: Optional[Any] = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = self.get_lang_id(snake_case_ )
UpperCamelCase_: str = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self : List[Any] ):
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self : Union[str, Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: int = self.get_lang_token(snake_case_ )
UpperCamelCase_: Dict = self.lang_token_to_id[lang_token]
UpperCamelCase_: List[str] = [self.cur_lang_id]
UpperCamelCase_: Dict = [self.eos_token_id]
def lowerCAmelCase__ ( self : Any , snake_case_ : str ):
UpperCamelCase_: List[str] = self.get_lang_token(snake_case_ )
UpperCamelCase_: Any = self.lang_token_to_id[lang_token]
UpperCamelCase_: Tuple = [self.cur_lang_id]
UpperCamelCase_: int = [self.eos_token_id]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : str ):
return self.lang_code_to_token[lang]
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str ):
UpperCamelCase_: Optional[int] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def A__ ( lowerCamelCase , lowerCamelCase ) -> sentencepiece.SentencePieceProcessor:
UpperCamelCase_: Dict = sentencepiece.SentencePieceProcessor(**lowerCamelCase )
spm.Load(str(lowerCamelCase ) )
return spm
def A__ ( lowerCamelCase ) -> Union[Dict, List]:
with open(lowerCamelCase , """r""" ) as f:
return json.load(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase ) -> None:
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=2 )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 0 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : str = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 0 |
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
if index == number_of_items:
return 0
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Union[str, Any] = knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase_: int = values[index] + knapsack(
lowerCamelCase , lowerCamelCase , lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 703 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def lowerCAmelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = (3, 32, 128)
UpperCamelCase_: Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_: Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCamelCase_: Optional[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
UpperCamelCase_: List[Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
UpperCamelCase_: Any = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : List[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : str ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase_: Any = Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) )
return image_input
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Any = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: List[str] = self.get_image_processor()
UpperCamelCase_: List[str] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Optional[Any] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[int] = self.prepare_image_inputs()
UpperCamelCase_: Optional[int] = image_processor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: Union[str, Any] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: str = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Dict = """test"""
UpperCamelCase_: Dict = processor(text=snake_case_ )
UpperCamelCase_: Any = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[Any] = self.get_image_processor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: int = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Tuple = """test"""
UpperCamelCase_: int = self.prepare_image_inputs()
UpperCamelCase_: str = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = self.get_image_processor()
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: Any = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Union[str, Any] = processor.char_decode(snake_case_ )
UpperCamelCase_: int = tokenizer.batch_decode(snake_case_ )
UpperCamelCase_: int = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Dict = self.get_tokenizer()
UpperCamelCase_: Dict = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: Optional[int] = self.prepare_image_inputs()
UpperCamelCase_: List[str] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Any = torch.randn(1 , 27 , 38 )
UpperCamelCase_: int = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase_: Optional[int] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase_: List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 704 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 705 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
snake_case : Tuple = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = """linear"""
__UpperCamelCase : List[Any] = """cosine"""
__UpperCamelCase : int = """cosine_with_restarts"""
__UpperCamelCase : List[str] = """polynomial"""
__UpperCamelCase : Any = """constant"""
__UpperCamelCase : List[str] = """constant_with_warmup"""
__UpperCamelCase : str = """piecewise_constant"""
def A__ ( lowerCamelCase , lowerCamelCase = -1 ) -> Any:
return LambdaLR(lowerCamelCase , lambda lowerCamelCase : 1 , last_epoch=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ) -> Dict:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1.0 , lowerCamelCase ) )
return 1.0
return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ) -> Optional[int]:
UpperCamelCase_: Dict = {}
UpperCamelCase_: List[str] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
UpperCamelCase_: Tuple = rule_str.split(""":""" )
UpperCamelCase_: Dict = int(lowerCamelCase )
UpperCamelCase_: Dict = float(lowerCamelCase )
UpperCamelCase_: Any = value
UpperCamelCase_: Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase , lowerCamelCase ):
def rule_func(lowerCamelCase ) -> float:
UpperCamelCase_: Dict = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCamelCase_: Optional[Any] = create_rules_function(lowerCamelCase , lowerCamelCase )
return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=-1 ) -> Optional[Any]:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.5 , lowerCamelCase = -1 ) -> Dict:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
UpperCamelCase_: Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = -1 ) -> Optional[Any]:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
UpperCamelCase_: Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1E-7 , lowerCamelCase=1.0 , lowerCamelCase=-1 ) -> List[str]:
UpperCamelCase_: List[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCamelCase_: Optional[int] = lr_init - lr_end
UpperCamelCase_: Optional[Any] = num_training_steps - num_warmup_steps
UpperCamelCase_: Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCamelCase_: Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 1.0 , lowerCamelCase = -1 , ) -> Optional[int]:
UpperCamelCase_: List[Any] = SchedulerType(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase , last_epoch=lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase , step_rules=lowerCamelCase , last_epoch=lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase , num_warmup_steps=lowerCamelCase , last_epoch=lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , num_cycles=lowerCamelCase , last_epoch=lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , power=lowerCamelCase , last_epoch=lowerCamelCase , )
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , last_epoch=lowerCamelCase )
| 706 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 0 |
import math
import unittest
def A__ ( lowerCamelCase ) -> bool:
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCAmelCase__ ( self : Tuple ):
with self.assertRaises(snake_case_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 707 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 0 |
import requests
def A__ ( lowerCamelCase , lowerCamelCase ) -> None:
UpperCamelCase_: Union[str, Any] = {"""Content-Type""": """application/json"""}
UpperCamelCase_: List[Any] = requests.post(lowerCamelCase , json={"""text""": message_body} , headers=lowerCamelCase )
if response.status_code != 2_00:
UpperCamelCase_: Optional[Any] = (
"""Request to slack returned an error """
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Tuple = """ViltImageProcessor"""
__UpperCamelCase : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , snake_case_ : str=None , snake_case_ : Optional[int]=None , **snake_case_ : str ):
UpperCamelCase_: List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
UpperCamelCase_: List[str] = kwargs.pop("""feature_extractor""" )
UpperCamelCase_: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
UpperCamelCase_: int = self.image_processor
def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : int , ):
UpperCamelCase_: Any = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
UpperCamelCase_: Any = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCAmelCase__ ( self : Optional[Any] , *snake_case_ : Dict , **snake_case_ : Tuple ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = self.tokenizer.model_input_names
UpperCamelCase_: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self : List[Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , snake_case_ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : int ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , snake_case_ , )
return self.image_processor
| 709 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 0 |
def A__ ( lowerCamelCase ) -> int:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCamelCase_: Optional[Any] = grid[0]
for row_n in range(1 , len(lowerCamelCase ) ):
UpperCamelCase_: Dict = grid[row_n]
UpperCamelCase_: int = fill_row(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Any = grid[row_n]
return grid[-1][-1]
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 711 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase_ : Dict = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : str ):
UpperCamelCase_: Any = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase_: Dict = FlaxBertModel(snake_case_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
UpperCamelCase_: Dict = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ , repo_id="""test-model-flax""" , push_to_hub=snake_case_ , use_auth_token=self._token )
UpperCamelCase_: Tuple = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase_: int = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase_: Dict = FlaxBertModel(snake_case_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
UpperCamelCase_: Any = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase_: List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=snake_case_ , use_auth_token=self._token )
UpperCamelCase_: str = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = True
UpperCamelCase_: Optional[Any] = flatten_dict(modela.params )
UpperCamelCase_: int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCamelCase_: Union[str, Any] = False
return models_are_equal
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase_: int = FlaxBertModel(snake_case_ )
UpperCamelCase_: Any = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) )
with self.assertRaises(snake_case_ ):
UpperCamelCase_: List[Any] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: Any = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase_: Tuple = FlaxBertModel(snake_case_ )
UpperCamelCase_: Tuple = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) , max_shard_size="""10KB""" )
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Optional[int] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: List[str] = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = """bert"""
UpperCamelCase_: Optional[int] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(snake_case_ ):
UpperCamelCase_: int = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: List[str] = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = """bert"""
UpperCamelCase_: str = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Optional[int] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: int = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
| 712 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase_ : Tuple = logging.getLogger(__name__)
class _UpperCamelCase ( _A ):
__UpperCamelCase : Any = """token-classification"""
def __init__( self : List[Any] , snake_case_ : int ):
if type(snake_case_ ) == dict:
UpperCamelCase_: List[str] = Namespace(**snake_case_ )
UpperCamelCase_: Any = import_module("""tasks""" )
try:
UpperCamelCase_: List[Any] = getattr(snake_case_ , hparams.task_type )
UpperCamelCase_: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCamelCase_: Tuple = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase_: Tuple = CrossEntropyLoss().ignore_index
super().__init__(snake_case_ , len(self.labels ) , self.mode )
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : str ):
return self.model(**snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase_: Tuple = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase_: Union[str, Any] = self(**snake_case_ )
UpperCamelCase_: List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase_: str = self._feature_file(snake_case_ )
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case_ )
UpperCamelCase_: Optional[int] = torch.load(snake_case_ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
UpperCamelCase_: Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case_ )
UpperCamelCase_: Tuple = self.token_classification_task.convert_examples_to_features(
snake_case_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case_ )
torch.save(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False ):
UpperCamelCase_: int = self._feature_file(snake_case_ )
logger.info("""Loading features from cached file %s""" , snake_case_ )
UpperCamelCase_: Optional[int] = torch.load(snake_case_ )
UpperCamelCase_: Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase_: Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase_: str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase_: Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase_: List[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , batch_size=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
"""Compute validation""" ""
UpperCamelCase_: str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase_: Optional[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase_: int = self(**snake_case_ )
UpperCamelCase_: Optional[int] = outputs[:2]
UpperCamelCase_: Tuple = logits.detach().cpu().numpy()
UpperCamelCase_: List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: int = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
UpperCamelCase_: List[Any] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
UpperCamelCase_: List[Any] = np.argmax(snake_case_ , axis=2 )
UpperCamelCase_: List[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
UpperCamelCase_: List[Any] = dict(enumerate(self.labels ) )
UpperCamelCase_: Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase_: int = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase_: Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case_ , snake_case_ ),
"""precision""": precision_score(snake_case_ , snake_case_ ),
"""recall""": recall_score(snake_case_ , snake_case_ ),
"""f1""": fa_score(snake_case_ , snake_case_ ),
}
UpperCamelCase_: List[str] = dict(results.items() )
UpperCamelCase_: List[str] = results
return ret, preds_list, out_label_list
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[Any] ):
# when stable
UpperCamelCase_: Dict = self._eval_end(snake_case_ )
UpperCamelCase_: Tuple = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase__ ( self : Dict , snake_case_ : Union[str, Any] ):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase_: Union[str, Any] = self._eval_end(snake_case_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase_: Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase__ ( snake_case_ : List[Any] , snake_case_ : Optional[int] ):
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case_ , snake_case_ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case_ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case_ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase_ : str = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase_ : Optional[int] = parser.parse_args()
lowerCamelCase_ : List[Any] = NERTransformer(args)
lowerCamelCase_ : Dict = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase_ : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
lowerCamelCase_ : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 713 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase_ : List[str] = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 13_10_72,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
}
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
return torch.atana(lowerCamelCase , lowerCamelCase ) / math.pi * 2
def A__ ( lowerCamelCase ) -> List[str]:
UpperCamelCase_: int = torch.sin(t * math.pi / 2 ) ** 2
UpperCamelCase_: Optional[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase , lowerCamelCase )
class _UpperCamelCase ( _A ):
'''simple docstring'''
pass
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : Any ):
super().__init__()
UpperCamelCase_: Dict = DiffusionAttnUnetaD(snake_case_ , n_attn_layers=4 )
UpperCamelCase_: List[str] = deepcopy(self.diffusion )
UpperCamelCase_: int = torch.quasirandom.SobolEngine(1 , scramble=snake_case_ )
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Union[str, Any] = MODELS_MAP[model_name]["""url"""]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCamelCase_ : str = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
lowerCamelCase_ : List[Any] = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
lowerCamelCase_ : Tuple = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
lowerCamelCase_ : List[Any] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
lowerCamelCase_ : int = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
lowerCamelCase_ : Tuple = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def A__ ( lowerCamelCase ) -> str:
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A__ ( lowerCamelCase ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase ) and not isinstance(lowerCamelCase , lowerCamelCase ):
return name.replace(lowerCamelCase , lowerCamelCase )
elif name.startswith(lowerCamelCase ):
return [name.replace(lowerCamelCase , lowerCamelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A__ ( lowerCamelCase , lowerCamelCase=13 ) -> Tuple:
UpperCamelCase_: Optional[Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
UpperCamelCase_: str = 0
if string.startswith("""net.3.""" ):
depth += 1
UpperCamelCase_: List[str] = string[6:]
elif string.startswith("""net.""" ):
UpperCamelCase_: int = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
UpperCamelCase_: Dict = string[7:]
if string.startswith("""main.""" ):
UpperCamelCase_: str = string[5:]
# mid block
if string[:2].isdigit():
UpperCamelCase_: List[Any] = string[:2]
UpperCamelCase_: Optional[int] = string[2:]
else:
UpperCamelCase_: Union[str, Any] = string[0]
UpperCamelCase_: List[Any] = string[1:]
if depth == max_depth:
UpperCamelCase_: Tuple = MID_NUM_TO_LAYER[layer_num]
UpperCamelCase_: Optional[int] = """mid_block"""
elif depth > 0 and int(lowerCamelCase ) < 7:
UpperCamelCase_: str = DOWN_NUM_TO_LAYER[layer_num]
UpperCamelCase_: Tuple = F'''down_blocks.{depth}'''
elif depth > 0 and int(lowerCamelCase ) > 7:
UpperCamelCase_: Dict = UP_NUM_TO_LAYER[layer_num]
UpperCamelCase_: List[Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
UpperCamelCase_: List[Any] = DEPTH_0_TO_LAYER[layer_num]
UpperCamelCase_: int = F'''up_blocks.{max_depth - 1}''' if int(lowerCamelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
UpperCamelCase_: int = string_left[1:]
if "resnets" in new_layer:
UpperCamelCase_: int = convert_resconv_naming(lowerCamelCase )
elif "attentions" in new_layer:
UpperCamelCase_: str = convert_attn_naming(lowerCamelCase )
UpperCamelCase_: List[Any] = new_string_left
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: int = prefix + """.""" + new_layer + """.""" + string_left
else:
UpperCamelCase_: Tuple = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Tuple = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
UpperCamelCase_: int = rename(lowerCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: int = transform_conv_attns(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase_: Dict = v
return new_state_dict
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
if len(lowerCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
UpperCamelCase_: Any = v[:, :, 0]
else:
# bias
UpperCamelCase_: Any = v
else:
# qkv matrices
UpperCamelCase_: int = v.shape[0]
UpperCamelCase_: Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCamelCase_: List[Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCamelCase_: List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A__ ( lowerCamelCase ) -> Tuple:
UpperCamelCase_: Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCamelCase_: str = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
UpperCamelCase_: Any = download(lowerCamelCase )
UpperCamelCase_: List[Any] = MODELS_MAP[model_name]["""sample_rate"""]
UpperCamelCase_: Union[str, Any] = MODELS_MAP[model_name]["""sample_size"""]
UpperCamelCase_: Union[str, Any] = Object()
UpperCamelCase_: str = sample_size
UpperCamelCase_: Optional[int] = sample_rate
UpperCamelCase_: int = 0
UpperCamelCase_: int = UNetaDModel(sample_size=lowerCamelCase , sample_rate=lowerCamelCase )
UpperCamelCase_: Tuple = diffusers_model.state_dict()
UpperCamelCase_: Any = DiffusionUncond(lowerCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase )["""state_dict"""] )
UpperCamelCase_: Union[str, Any] = orig_model.diffusion_ema.eval()
UpperCamelCase_: Dict = orig_model.state_dict()
UpperCamelCase_: Union[str, Any] = rename_orig_weights(lowerCamelCase )
UpperCamelCase_: int = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCamelCase_: str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
UpperCamelCase_: Tuple = value.squeeze()
UpperCamelCase_: Optional[int] = value
diffusers_model.load_state_dict(lowerCamelCase )
UpperCamelCase_: str = 1_00
UpperCamelCase_: Optional[Any] = 33
UpperCamelCase_: List[str] = IPNDMScheduler(num_train_timesteps=lowerCamelCase )
UpperCamelCase_: Union[str, Any] = torch.manual_seed(lowerCamelCase )
UpperCamelCase_: List[str] = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase ).to(lowerCamelCase )
UpperCamelCase_: int = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase )[:-1]
UpperCamelCase_: Dict = get_crash_schedule(lowerCamelCase )
UpperCamelCase_: List[str] = DanceDiffusionPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
UpperCamelCase_: str = torch.manual_seed(33 )
UpperCamelCase_: Any = pipe(num_inference_steps=lowerCamelCase , generator=lowerCamelCase ).audios
UpperCamelCase_: str = sampling.iplms_sample(lowerCamelCase , lowerCamelCase , lowerCamelCase , {} )
UpperCamelCase_: int = generated.clamp(-1 , 1 )
UpperCamelCase_: str = (generated - audio).abs().sum()
UpperCamelCase_: Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase )
print("""Diff max""" , lowerCamelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCamelCase_ : Dict = parser.parse_args()
main(args)
| 714 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCamelCase ( _A , _A ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , snake_case_ : int = 128 , snake_case_ : int = 256 , snake_case_ : float = 2000.0 , snake_case_ : int = 768 , snake_case_ : int = 12 , snake_case_ : int = 12 , snake_case_ : int = 64 , snake_case_ : int = 2048 , snake_case_ : float = 0.1 , ):
super().__init__()
UpperCamelCase_: Tuple = nn.Sequential(
nn.Linear(snake_case_ , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , )
UpperCamelCase_: Any = nn.Embedding(snake_case_ , snake_case_ )
UpperCamelCase_: Tuple = False
UpperCamelCase_: Optional[int] = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Tuple = nn.Dropout(p=snake_case_ )
UpperCamelCase_: int = nn.ModuleList()
for lyr_num in range(snake_case_ ):
# FiLM conditional T5 decoder
UpperCamelCase_: str = DecoderLayer(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
self.decoders.append(snake_case_ )
UpperCamelCase_: Union[str, Any] = TaLayerNorm(snake_case_ )
UpperCamelCase_: Tuple = nn.Dropout(p=snake_case_ )
UpperCamelCase_: Any = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ):
UpperCamelCase_: List[str] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase_: Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase_: str = self.conditioning_emb(snake_case_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase_: Optional[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase_: Union[str, Any] = torch.broadcast_to(
torch.arange(snake_case_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase_: List[str] = self.position_encoding(snake_case_ )
UpperCamelCase_: List[Any] = self.continuous_inputs_projection(snake_case_ )
inputs += position_encodings
UpperCamelCase_: Union[str, Any] = self.dropout(snake_case_ )
# decoder: No padding present.
UpperCamelCase_: List[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase_: Tuple = [(x, self.encoder_decoder_mask(snake_case_ , snake_case_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase_: List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase_: int = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase_: Union[str, Any] = lyr(
snake_case_ , conditioning_emb=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )[0]
UpperCamelCase_: Any = self.decoder_norm(snake_case_ )
UpperCamelCase_: Union[str, Any] = self.post_dropout(snake_case_ )
UpperCamelCase_: List[Any] = self.spec_out(snake_case_ )
return spec_out
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[str]=1e-6 ):
super().__init__()
UpperCamelCase_: Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ ) )
def lowerCAmelCase__ ( self : Any , snake_case_ : Any , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , ):
UpperCamelCase_: str = self.layer[0](
snake_case_ , conditioning_emb=snake_case_ , attention_mask=snake_case_ , )
if encoder_hidden_states is not None:
UpperCamelCase_: Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase_: Any = self.layer[1](
snake_case_ , key_value_states=snake_case_ , attention_mask=snake_case_ , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase_: Tuple = self.layer[-1](snake_case_ , snake_case_ )
return (hidden_states,)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any ):
super().__init__()
UpperCamelCase_: Optional[Any] = TaLayerNorm(snake_case_ )
UpperCamelCase_: Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
UpperCamelCase_: Dict = Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
UpperCamelCase_: int = nn.Dropout(snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : List[Any]=None , ):
# pre_self_attention_layer_norm
UpperCamelCase_: Union[str, Any] = self.layer_norm(snake_case_ )
if conditioning_emb is not None:
UpperCamelCase_: List[str] = self.FiLMLayer(snake_case_ , snake_case_ )
# Self-attention block
UpperCamelCase_: Optional[int] = self.attention(snake_case_ )
UpperCamelCase_: List[str] = hidden_states + self.dropout(snake_case_ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple ):
super().__init__()
UpperCamelCase_: Any = Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
UpperCamelCase_: int = TaLayerNorm(snake_case_ , eps=snake_case_ )
UpperCamelCase_: List[Any] = nn.Dropout(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , ):
UpperCamelCase_: Optional[Any] = self.layer_norm(snake_case_ )
UpperCamelCase_: str = self.attention(
snake_case_ , encoder_hidden_states=snake_case_ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase_: List[str] = hidden_states + self.dropout(snake_case_ )
return layer_output
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
super().__init__()
UpperCamelCase_: Optional[int] = TaDenseGatedActDense(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
UpperCamelCase_: Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
UpperCamelCase_: Optional[Any] = TaLayerNorm(snake_case_ , eps=snake_case_ )
UpperCamelCase_: int = nn.Dropout(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : int , snake_case_ : Any=None ):
UpperCamelCase_: Dict = self.layer_norm(snake_case_ )
if conditioning_emb is not None:
UpperCamelCase_: List[Any] = self.film(snake_case_ , snake_case_ )
UpperCamelCase_: Union[str, Any] = self.DenseReluDense(snake_case_ )
UpperCamelCase_: Optional[int] = hidden_states + self.dropout(snake_case_ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ):
super().__init__()
UpperCamelCase_: List[Any] = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Tuple = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Tuple = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Optional[Any] = nn.Dropout(snake_case_ )
UpperCamelCase_: int = NewGELUActivation()
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[Any] ):
UpperCamelCase_: int = self.act(self.wi_a(snake_case_ ) )
UpperCamelCase_: Union[str, Any] = self.wi_a(snake_case_ )
UpperCamelCase_: Dict = hidden_gelu * hidden_linear
UpperCamelCase_: Any = self.dropout(snake_case_ )
UpperCamelCase_: Union[str, Any] = self.wo(snake_case_ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any]=1e-6 ):
super().__init__()
UpperCamelCase_: List[Any] = nn.Parameter(torch.ones(snake_case_ ) )
UpperCamelCase_: List[Any] = eps
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCamelCase_: Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case_ )
UpperCamelCase_: Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase_: Dict = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(snake_case_ , 3.0 )) ))
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : int ):
super().__init__()
UpperCamelCase_: int = nn.Linear(snake_case_ , out_features * 2 , bias=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Dict ):
UpperCamelCase_: Optional[Any] = self.scale_bias(snake_case_ )
UpperCamelCase_: List[str] = torch.chunk(snake_case_ , 2 , -1 )
UpperCamelCase_: Tuple = x * (1 + scale) + shift
return x
| 716 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = """deit"""
def __init__( self : str , snake_case_ : Tuple=768 , snake_case_ : str=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Optional[int]=3072 , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : str=0.02 , snake_case_ : Optional[int]=1e-12 , snake_case_ : Optional[Any]=224 , snake_case_ : Any=16 , snake_case_ : str=3 , snake_case_ : Dict=True , snake_case_ : str=16 , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
UpperCamelCase_: List[Any] = hidden_size
UpperCamelCase_: List[str] = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: Any = intermediate_size
UpperCamelCase_: List[str] = hidden_act
UpperCamelCase_: Any = hidden_dropout_prob
UpperCamelCase_: Dict = attention_probs_dropout_prob
UpperCamelCase_: str = initializer_range
UpperCamelCase_: List[str] = layer_norm_eps
UpperCamelCase_: Any = image_size
UpperCamelCase_: Dict = patch_size
UpperCamelCase_: int = num_channels
UpperCamelCase_: List[str] = qkv_bias
UpperCamelCase_: Optional[int] = encoder_stride
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : str ):
return 1e-4
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
lowerCamelCase_ : str = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """switch_transformers"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=3_2128 , snake_case_ : List[str]=768 , snake_case_ : int=64 , snake_case_ : Optional[int]=2048 , snake_case_ : Dict=64 , snake_case_ : Dict=12 , snake_case_ : Optional[int]=3 , snake_case_ : str=12 , snake_case_ : Tuple=3 , snake_case_ : Optional[int]=12 , snake_case_ : Union[str, Any]=8 , snake_case_ : List[Any]=False , snake_case_ : List[str]=0.01 , snake_case_ : Tuple="float32" , snake_case_ : Union[str, Any]=False , snake_case_ : Union[str, Any]=32 , snake_case_ : Union[str, Any]=128 , snake_case_ : Any=0.1 , snake_case_ : int=1e-6 , snake_case_ : List[str]=0.001 , snake_case_ : List[str]=0.001 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : str="relu" , snake_case_ : int=True , snake_case_ : Dict=False , snake_case_ : Optional[Any]=True , snake_case_ : Dict=0 , snake_case_ : Tuple=1 , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Any = d_model
UpperCamelCase_: Any = d_kv
UpperCamelCase_: Union[str, Any] = d_ff
UpperCamelCase_: Optional[Any] = num_sparse_encoder_layers
UpperCamelCase_: Tuple = num_layers
UpperCamelCase_: List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase_: str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCamelCase_: Any = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCamelCase_: Dict = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCamelCase_: Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCamelCase_: Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCamelCase_: List[str] = num_heads
UpperCamelCase_: Optional[int] = num_experts
UpperCamelCase_: Union[str, Any] = expert_capacity
UpperCamelCase_: Tuple = router_bias
UpperCamelCase_: str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCamelCase_: Tuple = router_dtype
UpperCamelCase_: Optional[int] = router_ignore_padding_tokens
UpperCamelCase_: Optional[Any] = relative_attention_num_buckets
UpperCamelCase_: List[Any] = relative_attention_max_distance
UpperCamelCase_: List[Any] = dropout_rate
UpperCamelCase_: Optional[int] = layer_norm_epsilon
UpperCamelCase_: Any = initializer_factor
UpperCamelCase_: Optional[int] = feed_forward_proj
UpperCamelCase_: Tuple = use_cache
UpperCamelCase_: Tuple = add_router_probs
UpperCamelCase_: Union[str, Any] = router_z_loss_coef
UpperCamelCase_: Tuple = router_aux_loss_coef
UpperCamelCase_: Tuple = self.feed_forward_proj.split("""-""" )
UpperCamelCase_: str = act_info[-1]
UpperCamelCase_: Dict = act_info[0] == """gated"""
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase_: Any = """gelu_new"""
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , )
| 718 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = """encoder-decoder"""
__UpperCamelCase : Dict = True
def __init__( self : Tuple , **snake_case_ : Tuple ):
super().__init__(**snake_case_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase_: List[Any] = kwargs.pop("""encoder""" )
UpperCamelCase_: Union[str, Any] = encoder_config.pop("""model_type""" )
UpperCamelCase_: Optional[int] = kwargs.pop("""decoder""" )
UpperCamelCase_: str = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase_: List[Any] = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: List[Any] = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Any , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : Optional[Any] ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase_: List[str] = True
UpperCamelCase_: str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = copy.deepcopy(self.__dict__ )
UpperCamelCase_: Optional[Any] = self.encoder.to_dict()
UpperCamelCase_: Optional[int] = self.decoder.to_dict()
UpperCamelCase_: List[str] = self.__class__.model_type
return output
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : Optional[Any] = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Any = """Pix2StructImageProcessor"""
__UpperCamelCase : Union[str, Any] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = False
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Optional[Any] , snake_case_ : List[Any]=None , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = 2048 , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Any , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCamelCase_: str = self.tokenizer
UpperCamelCase_: Tuple = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCamelCase_: Tuple = self.image_processor(
snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , **snake_case_ )
else:
# add pixel_values and bbox
UpperCamelCase_: Tuple = self.image_processor(
snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , header_text=snake_case_ , **snake_case_ )
if text is not None and not self.image_processor.is_vqa:
UpperCamelCase_: str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if "attention_mask" in text_encoding:
UpperCamelCase_: Any = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
UpperCamelCase_: str = text_encoding.pop("""input_ids""" )
else:
UpperCamelCase_: int = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *snake_case_ : Tuple , **snake_case_ : List[str] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = self.tokenizer.model_input_names
UpperCamelCase_: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 0 |
from math import ceil
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(range(0 , lowerCamelCase_ ) )
lowercase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase__ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCamelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCamelCase_ )
# Missing blocks
lowercase__ = [i for i in blocks if i not in device_map_blocks]
lowercase__ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(lowerCamelCase_ ) )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(range(lowerCamelCase_ ) )
lowercase__ = int(ceil(n_layers / len(lowerCamelCase_ ) ) )
lowercase__ = [layers[i : i + n_blocks] for i in range(0 , lowerCamelCase_ , lowerCamelCase_ )]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Any, **lowerCamelCase : int ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase, padding='''max_length''', max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """yolos"""
def __init__( self : Any, lowerCamelCase : Tuple=768, lowerCamelCase : Optional[Any]=12, lowerCamelCase : Any=12, lowerCamelCase : Any=3_072, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : List[Any]=0.0, lowerCamelCase : List[str]=0.0, lowerCamelCase : List[Any]=0.02, lowerCamelCase : str=1E-12, lowerCamelCase : List[Any]=[512, 864], lowerCamelCase : Any=16, lowerCamelCase : List[str]=3, lowerCamelCase : int=True, lowerCamelCase : Optional[int]=100, lowerCamelCase : Optional[int]=True, lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[Any]=1, lowerCamelCase : List[str]=5, lowerCamelCase : List[Any]=2, lowerCamelCase : List[Any]=5, lowerCamelCase : Any=2, lowerCamelCase : Dict=0.1, **lowerCamelCase : int, ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = num_detection_tokens
lowercase__ = use_mid_position_embeddings
lowercase__ = auxiliary_loss
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = version.parse("""1.11""" )
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 12
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Tuple = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : List[Any], lowerCamelCase : Optional[float] = 0.1, lowerCamelCase : Optional[Union[str, Callable]] = "gelu", lowerCamelCase : Optional[int] = 30_522, lowerCamelCase : Optional[int] = 1_024, lowerCamelCase : Optional[int] = 4_096, lowerCamelCase : Optional[int] = 12, lowerCamelCase : Optional[int] = 16, lowerCamelCase : Optional[int] = 4_096, lowerCamelCase : Optional[int] = 12, lowerCamelCase : Optional[int] = 16, lowerCamelCase : Optional[float] = 0.1, lowerCamelCase : Optional[float] = 0.1, lowerCamelCase : Optional[int] = 512, lowerCamelCase : Optional[float] = 0.02, lowerCamelCase : Optional[bool] = True, lowerCamelCase : Optional[bool] = True, lowerCamelCase : Optional[int] = 0, lowerCamelCase : Optional[int] = 2, lowerCamelCase : Optional[int] = 32, lowerCamelCase : Optional[int] = 128, lowerCamelCase : Optional[bool] = False, lowerCamelCase : Optional[float] = 0.0, lowerCamelCase : Optional[bool] = True, lowerCamelCase : Optional[int] = 0, lowerCamelCase : Optional[int] = 1, lowerCamelCase : Optional[int] = 2, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = encoder_ffn_dim
lowercase__ = num_encoder_layers
lowercase__ = num_encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = num_decoder_layers
lowercase__ = num_decoder_attention_heads
lowercase__ = max_position_embeddings
lowercase__ = init_std # Normal(0, this parameter)
lowercase__ = activation_function
# parameters for xlmprophetnet
lowercase__ = ngram
lowercase__ = num_buckets
lowercase__ = relative_max_distance
lowercase__ = disable_ngram_loss
lowercase__ = eps
# 3 Types of Dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = dropout
lowercase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, add_cross_attention=lowerCamelCase, decoder_start_token_id=lowerCamelCase, **lowerCamelCase, )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowercase__ ( self : Tuple, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : str, lowerCamelCase : float ):
'''simple docstring'''
return 0.0
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 512
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(lowerCamelCase_ ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.abs(np.fft.fft(lowerCamelCase_ ) )
lowercase__ = 20 * np.logaa(lowerCamelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowercase__ = get_bounds(lowerCamelCase_ , lowerCamelCase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowerCamelCase_ )
plt.show()
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 512
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(lowerCamelCase_ ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.angle(np.fft.fft(lowerCamelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowerCamelCase_ , -2 * pi ) )
plt.show()
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import argparse
import datetime
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
lowercase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase_ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
lowercase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
lowercase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
lowercase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
lowercase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
lowercase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
lowercase__ = datetime.date(int(lowerCamelCase_ ) , int(lowerCamelCase_ ) , int(lowerCamelCase_ ) )
# Start math
if m <= 2:
lowercase__ = y - 1
lowercase__ = m + 12
# maths var
lowercase__ = int(str(lowerCamelCase_ )[:2] )
lowercase__ = int(str(lowerCamelCase_ )[2:] )
lowercase__ = int(2.6 * m - 5.39 )
lowercase__ = int(c / 4 )
lowercase__ = int(k / 4 )
lowercase__ = int(d + k )
lowercase__ = int(t + u + v + x )
lowercase__ = int(z - (2 * c) )
lowercase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
lowercase__ = F"""Your date {date_input}, is a {days[str(lowerCamelCase_ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : str = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
A__ : Any = parser.parse_args()
zeller(args.date_input)
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
from math import sqrt
def a ( lowerCamelCase_ = 100_0000 ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 0
lowercase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = StableDiffusionInstructPixaPixPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
lowercase__ = CLIPTextModel(lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowercase__ = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' )
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = sd_pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = '''french fries'''
lowercase__ = sd_pipe(**lowerCamelCase, negative_prompt=lowerCamelCase )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = [inputs['''prompt''']] * 2
lowercase__ = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
lowercase__ = torch.from_numpy(lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
lowercase__ = image / 2 + 0.5
lowercase__ = image.permute(0, 3, 1, 2 )
lowercase__ = image.repeat(2, 1, 1, 1 )
lowercase__ = sd_pipe(**lowerCamelCase ).images
lowercase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = EulerAncestralDiscreteScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''' )
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = sd_pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [round(lowerCamelCase, 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = VaeImageProcessor(do_resize=lowerCamelCase, do_normalize=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase, input_image_type='''pt''' ) )[0]
lowercase__ = components['''vae''']
lowercase__ = self.get_dummy_inputs_by_type(lowerCamelCase, input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ = pipe(**lowerCamelCase )[0]
lowercase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase, 1E-4, '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any]=0 ):
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowercase__ = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = 0
def callback_fn(lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor ) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase__ = False
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase, torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
pipe(**lowerCamelCase, callback=lowerCamelCase, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase, torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ = inputs['''image'''].resize((504, 504) )
lowercase__ = '''timbrooks/instruct-pix2pix'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = pipe(**lowerCamelCase )
lowercase__ = output.images[0]
lowercase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = torch.nn.Linear(10, 10 )
lowercase__ = torch.optim.SGD(model.parameters(), 0.1 )
lowercase__ = Accelerator()
lowercase__ = accelerator.prepare(lowerCamelCase )
try:
pickle.loads(pickle.dumps(lowerCamelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# Load configuration defined in the metadata file
with open(lowerCamelCase_ ) as metadata_file:
lowercase__ = json.load(lowerCamelCase_ )
lowercase__ = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowercase__ = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
lowercase__ = load_original_entity_vocab(lowerCamelCase_ )
# add an entry for [MASK2]
lowercase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase__ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ = AddedToken('''<ent>''' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
lowercase__ = AddedToken('''<ent2>''' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
lowercase__ = json.load(lowerCamelCase_ )
lowercase__ = '''MLukeTokenizer'''
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
lowercase__ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
lowercase__ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
lowercase__ = state_dict['''embeddings.word_embeddings.weight''']
lowercase__ = word_emb[ent_init_index].unsqueeze(0 )
lowercase__ = word_emb[enta_init_index].unsqueeze(0 )
lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase__ = state_dict[bias_name]
lowercase__ = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase__ = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ = F"""encoder.layer.{layer_index}.attention.self."""
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowercase__ = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowercase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase__ = state_dict['''entity_predictions.bias''']
lowercase__ = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowercase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase__ = LukeForMaskedLM(config=lowerCamelCase_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
lowercase__ = state_dict[key]
else:
lowercase__ = state_dict[key]
lowercase__ , lowercase__ = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if set(lowerCamelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowerCamelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase__ = MLukeTokenizer.from_pretrained(lowerCamelCase_ , task='''entity_classification''' )
lowercase__ = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
lowercase__ = (0, 9)
lowercase__ = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='''pt''' )
lowercase__ = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ = torch.Size((1, 33, 768) )
lowercase__ = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ = torch.Size((1, 1, 768) )
lowercase__ = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase__ = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
lowercase__ = '''Tokyo is the capital of <mask>.'''
lowercase__ = (24, 30)
lowercase__ = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='''pt''' )
lowercase__ = model(**lowerCamelCase_ )
lowercase__ = encoding['''input_ids'''][0].tolist()
lowercase__ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
lowercase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase_ )
lowercase__ = outputs.entity_logits[0][0].argmax().item()
lowercase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
lowercase__ = [json.loads(lowerCamelCase_ ) for line in open(lowerCamelCase_ )]
lowercase__ = {}
for entry in data:
lowercase__ = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase__ = entity_id
break
lowercase__ = F"""{language}:{entity_name}"""
lowercase__ = entity_id
return new_mapping
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
A__ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
import os
def a ( ):
'''simple docstring'''
lowercase__ = os.path.dirname(os.path.realpath(lowerCamelCase_ ) )
lowercase__ = os.path.join(lowerCamelCase_ , '''triangle.txt''' )
with open(lowerCamelCase_ ) as f:
lowercase__ = f.readlines()
lowercase__ = []
for line in triangle:
lowercase__ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(lowerCamelCase_ ) )
a.append(lowerCamelCase_ )
for i in range(1 , len(lowerCamelCase_ ) ):
for j in range(len(a[i] ) ):
lowercase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowercase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase_ , lowerCamelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
A__ : int = None
A__ : Dict = logging.get_logger(__name__)
A__ : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
A__ : Dict = {
'camembert-base': 5_12,
}
A__ : Any = '▁'
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = CamembertTokenizer
def __init__( self : Tuple, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : int="<s>", lowerCamelCase : Union[str, Any]="</s>", lowerCamelCase : Any="</s>", lowerCamelCase : Optional[Any]="<s>", lowerCamelCase : int="<unk>", lowerCamelCase : Any="<pad>", lowerCamelCase : Optional[int]="<mask>", lowerCamelCase : Any=["<s>NOTUSED", "</s>NOTUSED"], **lowerCamelCase : str, ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowercase__ ( self : Any, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file, lowerCamelCase )
return (out_vocab_file,)
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : int, lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=6, lowerCamelCase : List[Any]=17, lowerCamelCase : Optional[Any]=23, lowerCamelCase : Any=11, lowerCamelCase : List[Any]=True, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = act_dim
lowercase__ = state_dim
lowercase__ = hidden_size
lowercase__ = max_length
lowercase__ = is_training
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = ids_tensor((self.batch_size, self.seq_length), vocab_size=1_000 )
lowercase__ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowercase__ ( self : str ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, )
def lowercase__ ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = DecisionTransformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.parent.assertEqual(result.state_preds.shape, states.shape )
self.parent.assertEqual(result.action_preds.shape, actions.shape )
self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowercase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = DecisionTransformerModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@slow
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DecisionTransformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(lowerCamelCase )], lowerCamelCase )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = 2 # number of steps of autoregressive prediction we will perform
lowercase__ = 10 # defined by the RL environment, may be normalized
lowercase__ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase__ = model.to(lowerCamelCase )
lowercase__ = model.config
torch.manual_seed(0 )
lowercase__ = torch.randn(1, 1, config.state_dim ).to(device=lowerCamelCase, dtype=torch.floataa ) # env.reset()
lowercase__ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]], device=lowerCamelCase )
lowercase__ = torch.tensor(lowerCamelCase, device=lowerCamelCase, dtype=torch.floataa ).reshape(1, 1, 1 )
lowercase__ = state
lowercase__ = torch.zeros(1, 0, config.act_dim, device=lowerCamelCase, dtype=torch.floataa )
lowercase__ = torch.zeros(1, 0, device=lowerCamelCase, dtype=torch.floataa )
lowercase__ = torch.tensor(0, device=lowerCamelCase, dtype=torch.long ).reshape(1, 1 )
for step in range(lowerCamelCase ):
lowercase__ = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=lowerCamelCase )], dim=1 )
lowercase__ = torch.cat([rewards, torch.zeros(1, 1, device=lowerCamelCase )], dim=1 )
lowercase__ = torch.ones(1, states.shape[1] ).to(dtype=torch.long, device=states.device )
with torch.no_grad():
lowercase__ , lowercase__ , lowercase__ = model(
states=lowerCamelCase, actions=lowerCamelCase, rewards=lowerCamelCase, returns_to_go=lowerCamelCase, timesteps=lowerCamelCase, attention_mask=lowerCamelCase, return_dict=lowerCamelCase, )
self.assertEqual(action_pred.shape, actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1E-4 ) )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = ( # env.step(action)
torch.randn(1, 1, config.state_dim ).to(device=lowerCamelCase, dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase__ = action_pred[0, -1]
lowercase__ = torch.cat([states, state], dim=1 )
lowercase__ = returns_to_go[0, -1] - reward
lowercase__ = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1 )], dim=1 )
lowercase__ = torch.cat(
[timesteps, torch.ones((1, 1), device=lowerCamelCase, dtype=torch.long ) * (step + 1)], dim=1 )
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = set()
# edges = list of graph's edges
lowercase__ = get_edges(lowerCamelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase__ , lowercase__ = edges.pop()
chosen_vertices.add(lowerCamelCase_ )
chosen_vertices.add(lowerCamelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase_ )
return chosen_vertices
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A__ : Optional[Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
lowercase__ = [image]
lowercase__ = [trans(img.convert('''RGB''' ) ) for img in image]
lowercase__ = torch.stack(lowerCamelCase_ )
return image
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : str ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase__ ( self : Tuple, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : Any ):
'''simple docstring'''
# get the original timestep using init_timestep
lowercase__ = min(int(num_inference_steps * strength ), lowerCamelCase )
lowercase__ = max(num_inference_steps - init_timestep, 0 )
lowercase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not isinstance(lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}""" )
lowercase__ = image.to(device=lowerCamelCase, dtype=lowerCamelCase )
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase__ = init_latents.shape
lowercase__ = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase )
# get latents
print('''add noise to latents at timestep''', lowerCamelCase )
lowercase__ = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : List[str], lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None, lowerCamelCase : float = 0.8, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : float = 0.0, lowerCamelCase : int = 50, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, ):
'''simple docstring'''
self.check_inputs(lowerCamelCase )
# 2. Preprocess image
lowercase__ = preprocess(lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase, device=self.device )
lowercase__ , lowercase__ = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device )
lowercase__ = timesteps[:1].repeat(lowerCamelCase )
# 4. Prepare latent variables
lowercase__ = self.prepare_latents(lowerCamelCase, lowerCamelCase, lowerCamelCase, self.unet.dtype, self.device, lowerCamelCase )
lowercase__ = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase ):
# 1. predict noise model_output
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase, ).prev_sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase )
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase_ = 1_0001 ):
'''simple docstring'''
try:
lowercase__ = int(lowerCamelCase_ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
lowercase__ = []
lowercase__ = 2
while len(lowerCamelCase_ ) < nth:
if is_prime(lowerCamelCase_ ):
primes.append(lowerCamelCase_ )
num += 1
else:
num += 1
return primes[len(lowerCamelCase_ ) - 1]
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : Dict = logging.get_logger(__name__)
# General docstring
A__ : List[Any] = 'RegNetConfig'
# Base docstring
A__ : Any = 'facebook/regnet-y-040'
A__ : Union[str, Any] = [1, 10_88, 7, 7]
# Image classification docstring
A__ : Tuple = 'facebook/regnet-y-040'
A__ : Optional[int] = 'tabby, tabby cat'
A__ : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : int, lowerCamelCase : int = 3, lowerCamelCase : int = 1, lowerCamelCase : int = 1, lowerCamelCase : Optional[str] = "relu", **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase__ = tf.keras.layers.ConvaD(
filters=lowerCamelCase, kernel_size=lowerCamelCase, strides=lowerCamelCase, padding='''VALID''', groups=lowerCamelCase, use_bias=lowerCamelCase, name='''convolution''', )
lowercase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='''normalization''' )
lowercase__ = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self : Tuple, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.convolution(self.padding(lowerCamelCase ) )
lowercase__ = self.normalization(lowerCamelCase )
lowercase__ = self.activation(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : RegNetConfig, **lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = config.num_channels
lowercase__ = TFRegNetConvLayer(
out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name='''embedder''', )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = shape_list(lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase__ = tf.transpose(lowerCamelCase, perm=(0, 2, 3, 1) )
lowercase__ = self.embedder(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : int, lowerCamelCase : int = 2, **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = tf.keras.layers.ConvaD(
filters=lowerCamelCase, kernel_size=1, strides=lowerCamelCase, use_bias=lowerCamelCase, name='''convolution''' )
lowercase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='''normalization''' )
def lowercase__ ( self : int, lowerCamelCase : tf.Tensor, lowerCamelCase : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase ), training=lowerCamelCase )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int, lowerCamelCase : int, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase, name='''pooler''' )
lowercase__ = [
tf.keras.layers.ConvaD(filters=lowerCamelCase, kernel_size=1, activation='''relu''', name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=lowerCamelCase, kernel_size=1, activation='''sigmoid''', name='''attention.2''' ),
]
def lowercase__ ( self : Any, lowerCamelCase : List[Any] ):
'''simple docstring'''
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase__ = self.pooler(lowerCamelCase )
for layer_module in self.attention:
lowercase__ = layer_module(lowerCamelCase )
lowercase__ = hidden_state * pooled
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : RegNetConfig, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int = 1, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1, out_channels // config.groups_width )
lowercase__ = (
TFRegNetShortCut(lowerCamelCase, stride=lowerCamelCase, name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''', name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase__ = [
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=config.hidden_act, name='''layer.0''' ),
TFRegNetConvLayer(
lowerCamelCase, stride=lowerCamelCase, groups=lowerCamelCase, activation=config.hidden_act, name='''layer.1''' ),
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=lowerCamelCase, name='''layer.2''' ),
]
lowercase__ = ACTaFN[config.hidden_act]
def lowercase__ ( self : Any, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = hidden_state
for layer_module in self.layers:
lowercase__ = layer_module(lowerCamelCase )
lowercase__ = self.shortcut(lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : RegNetConfig, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int = 1, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1, out_channels // config.groups_width )
lowercase__ = (
TFRegNetShortCut(lowerCamelCase, stride=lowerCamelCase, name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''', name='''shortcut''' )
)
lowercase__ = [
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=config.hidden_act, name='''layer.0''' ),
TFRegNetConvLayer(
lowerCamelCase, stride=lowerCamelCase, groups=lowerCamelCase, activation=config.hidden_act, name='''layer.1''' ),
TFRegNetSELayer(lowerCamelCase, reduced_channels=int(round(in_channels / 4 ) ), name='''layer.2''' ),
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=lowerCamelCase, name='''layer.3''' ),
]
lowercase__ = ACTaFN[config.hidden_act]
def lowercase__ ( self : Optional[int], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = hidden_state
for layer_module in self.layers:
lowercase__ = layer_module(lowerCamelCase )
lowercase__ = self.shortcut(lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : RegNetConfig, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int = 2, lowerCamelCase : int = 2, **lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase__ = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase, lowerCamelCase, lowerCamelCase, stride=lowerCamelCase, name='''layers.0''' ),
*[layer(lowerCamelCase, lowerCamelCase, lowerCamelCase, name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
for layer_module in self.layers:
lowercase__ = layer_module(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : RegNetConfig, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name='''stages.0''', ) )
lowercase__ = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase, config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase, lowerCamelCase, lowerCamelCase, depth=lowerCamelCase, name=F"""stages.{i+1}""" ) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : tf.Tensor, lowerCamelCase : bool = False, lowerCamelCase : bool = True ):
'''simple docstring'''
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(lowerCamelCase )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase, hidden_states=lowerCamelCase )
@keras_serializable
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
lowercase__ = RegNetConfig
def __init__( self : str, lowerCamelCase : int, **lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = config
lowercase__ = TFRegNetEmbeddings(lowerCamelCase, name='''embedder''' )
lowercase__ = TFRegNetEncoder(lowerCamelCase, name='''encoder''' )
lowercase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase, name='''pooler''' )
@unpack_inputs
def lowercase__ ( self : Any, lowerCamelCase : tf.Tensor, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, ):
'''simple docstring'''
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(lowerCamelCase, training=lowerCamelCase )
lowercase__ = self.encoder(
lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
lowercase__ = tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2) )
lowercase__ = tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase__ = tuple([tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase, pooler_output=lowerCamelCase, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = RegNetConfig
lowercase__ = """regnet"""
lowercase__ = """pixel_values"""
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.floataa )}
A__ : List[str] = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
A__ : Dict = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,A__ ,)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : RegNetConfig, *lowerCamelCase : List[str], **lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
lowercase__ = TFRegNetMainLayer(lowerCamelCase, name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, modality='''vision''', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def lowercase__ ( self : str, lowerCamelCase : tf.Tensor, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Tuple=False, ):
'''simple docstring'''
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(
pixel_values=lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase, )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,A__ ,)
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : RegNetConfig, *lowerCamelCase : Dict, **lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
lowercase__ = config.num_labels
lowercase__ = TFRegNetMainLayer(lowerCamelCase, name='''regnet''' )
# classification head
lowercase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def lowercase__ ( self : List[Any], lowerCamelCase : tf.Tensor = None, lowerCamelCase : tf.Tensor = None, lowerCamelCase : bool = None, lowerCamelCase : bool = None, lowerCamelCase : Tuple=False, ):
'''simple docstring'''
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(
lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier[0](lowerCamelCase )
lowercase__ = self.classifier[1](lowerCamelCase )
lowercase__ = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase, logits=lowerCamelCase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase, logits=lowerCamelCase, hidden_states=outputs.hidden_states )
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : int, lowerCamelCase : Any=13, lowerCamelCase : Dict=7, lowerCamelCase : str=True, lowerCamelCase : Any=True, lowerCamelCase : Any=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : List[str]=True, lowerCamelCase : Tuple=False, lowerCamelCase : Tuple=False, lowerCamelCase : Optional[int]=False, lowerCamelCase : Tuple=2, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=0, lowerCamelCase : Optional[int]=32, lowerCamelCase : Union[str, Any]=5, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]=512, lowerCamelCase : Tuple=12, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=0.02, lowerCamelCase : Union[str, Any]=3, lowerCamelCase : Any=4, lowerCamelCase : Tuple="last", lowerCamelCase : Optional[int]=None, lowerCamelCase : Optional[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], 2 ).float()
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Any, ):
'''simple docstring'''
lowercase__ = FlaubertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase )
lowercase__ = model(lowerCamelCase, langs=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = FlaubertWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = FlaubertForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : str, ):
'''simple docstring'''
lowercase__ = FlaubertForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
lowercase__ = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = FlaubertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[Any], ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Any, lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase )
return inputs_dict
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37 )
def lowercase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCamelCase )
@slow
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
@require_torch_gpu
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=lowerCamelCase )
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowercase__ = torch.jit.trace(
lowerCamelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase, os.path.join(lowerCamelCase, '''traced_model.pt''' ) )
lowercase__ = torch.jit.load(os.path.join(lowerCamelCase, '''traced_model.pt''' ), map_location=lowerCamelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCamelCase ), inputs_dict['''attention_mask'''].to(lowerCamelCase ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCamelCase )
lowercase__ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import functools
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
@functools.cache
def min_distance(lowerCamelCase_ , lowerCamelCase_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Dict = None
A__ : int = logging.get_logger(__name__)
A__ : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A__ : str = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
A__ : Any = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
A__ : Optional[int] = '▁'
# Segments (not really needed)
A__ : Tuple = 0
A__ : Tuple = 1
A__ : Dict = 2
A__ : Any = 3
A__ : Any = 4
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = """left"""
lowercase__ = XLNetTokenizer
def __init__( self : Optional[Any], lowerCamelCase : Tuple=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Tuple=False, lowerCamelCase : List[Any]=True, lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[int]="<s>", lowerCamelCase : Any="</s>", lowerCamelCase : Dict="<unk>", lowerCamelCase : Optional[Any]="<sep>", lowerCamelCase : Optional[Any]="<pad>", lowerCamelCase : List[Any]="<cls>", lowerCamelCase : Optional[int]="<mask>", lowerCamelCase : List[Any]=["<eop>", "<eod>"], **lowerCamelCase : List[str], ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
vocab_file=lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, remove_space=lowerCamelCase, keep_accents=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : Optional[int], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file, lowerCamelCase )
return (out_vocab_file,)
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : str = '▁'
A__ : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
A__ : int = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A__ : int = {
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
A__ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
lowercase__ = []
def __init__( self : Optional[Any], lowerCamelCase : int, lowerCamelCase : Optional[int]="<s>", lowerCamelCase : List[Any]="</s>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : str="<s>", lowerCamelCase : Any="<unk>", lowerCamelCase : Dict="<pad>", lowerCamelCase : int="<mask>", lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : int=None, lowerCamelCase : Optional[Dict[str, Any]] = None, lowerCamelCase : List[Any]=None, lowerCamelCase : List[str]=False, **lowerCamelCase : str, ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = legacy_behaviour
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, tokenizer_file=lowerCamelCase, src_lang=lowerCamelCase, tgt_lang=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCamelCase, **lowerCamelCase, )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ = 1
lowercase__ = len(self.sp_model )
lowercase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase )
}
lowercase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase__ = src_lang if src_lang is not None else '''eng_Latn'''
lowercase__ = self.lang_code_to_id[self._src_lang]
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
lowercase__ = [1] * len(self.prefix_tokens )
lowercase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase )) + ([0] * len(lowerCamelCase )) + suffix_ones
def lowercase__ ( self : str, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : int, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : str, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str], lowerCamelCase : Optional[str], **lowerCamelCase : int ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase__ = src_lang
lowercase__ = self(lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase )
lowercase__ = self.convert_tokens_to_ids(lowerCamelCase )
lowercase__ = tgt_lang_id
return inputs
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Optional[int], lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : int, lowerCamelCase : List[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = ''''''.join(lowerCamelCase ).replace(lowerCamelCase, ''' ''' ).strip()
return out_string
def lowercase__ ( self : Tuple, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def lowercase__ ( self : int, lowerCamelCase : List[str], lowerCamelCase : str = "eng_Latn", lowerCamelCase : Optional[List[str]] = None, lowerCamelCase : str = "fra_Latn", **lowerCamelCase : Tuple, ):
'''simple docstring'''
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase, lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Any ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
def lowercase__ ( self : Tuple, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
import random
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = a[left_index]
lowercase__ = left_index + 1
for j in range(left_index + 1 , lowerCamelCase_ ):
if a[j] < pivot:
lowercase__ , lowercase__ = a[i], a[j]
i += 1
lowercase__ , lowercase__ = a[i - 1], a[left_index]
return i - 1
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if left < right:
lowercase__ = random.randint(lowerCamelCase_ , right - 1 )
lowercase__ , lowercase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ = partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
quick_sort_random(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase_ , pivot_index + 1 , lowerCamelCase_ ) # recursive quicksort to the right of the pivot point
def a ( ):
'''simple docstring'''
lowercase__ = input('''Enter numbers separated by a comma:\n''' ).strip()
lowercase__ = [int(lowerCamelCase_ ) for item in user_input.split(''',''' )]
quick_sort_random(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) )
print(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
A__ : Optional[int] = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
A__ : str = frozenset(['prompt', 'negative_prompt'])
A__ : Tuple = frozenset([])
A__ : Optional[Any] = frozenset(['image'])
A__ : Union[str, Any] = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
A__ : Tuple = frozenset(['image'])
A__ : Any = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
A__ : Union[str, Any] = frozenset(['prompt', 'image', 'negative_prompt'])
A__ : Optional[Any] = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
A__ : List[str] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
A__ : Dict = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
A__ : Tuple = frozenset(['image', 'mask_image'])
A__ : Optional[int] = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
A__ : List[str] = frozenset(['example_image', 'image', 'mask_image'])
A__ : str = frozenset(['class_labels'])
A__ : List[str] = frozenset(['class_labels'])
A__ : Dict = frozenset(['batch_size'])
A__ : int = frozenset([])
A__ : Dict = frozenset(['batch_size'])
A__ : Optional[Any] = frozenset([])
A__ : str = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
A__ : Union[str, Any] = frozenset(['prompt', 'negative_prompt'])
A__ : Union[str, Any] = frozenset(['input_tokens'])
A__ : int = frozenset(['input_tokens'])
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def a ( lowerCamelCase_ = 100_0000 , lowerCamelCase_ = 10 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowercase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowercase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCamelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__ : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# Initialise PyTorch model
lowercase__ = BertConfig.from_json_file(lowerCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__ = BertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(lowerCamelCase )
else:
self.left.insert(lowerCamelCase )
elif val > self.val:
if self.right is None:
lowercase__ = Node(lowerCamelCase )
else:
self.right.insert(lowerCamelCase )
else:
lowercase__ = val
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase_ )
res.append(root.val )
inorder(root.right , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# Build BST
if len(lowerCamelCase_ ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(lowerCamelCase_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(lowerCamelCase_ , lowerCamelCase_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
A__ : int = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = set()
# keep track of all the paths to be checked
lowercase__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__ = queue.pop(0 )
# get the last node from the path
lowercase__ = path[-1]
if node not in explored:
lowercase__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__ = list(lowerCamelCase_ )
new_path.append(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase_ )
# in case there's no path between the 2 nodes
return []
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__ = [start]
lowercase__ = set(lowerCamelCase_ )
# Keep tab on distances from `start` node.
lowercase__ = {start: 0, target: -1}
while queue:
lowercase__ = queue.pop(0 )
if node == target:
lowercase__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
lowercase__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
A__ : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : Any ):
'''simple docstring'''
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=lowerCamelCase )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*lowerCamelCase )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowercase__ ( self : Any, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase__ = self.pool(self.model(lowerCamelCase ) )
lowercase__ = torch.flatten(lowerCamelCase, start_dim=2 )
lowercase__ = out.transpose(1, 2 ).contiguous()
return out # BxNx2048
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = [json.loads(lowerCamelCase ) for l in open(lowerCamelCase )]
lowercase__ = os.path.dirname(lowerCamelCase )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(lowerCamelCase )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : int, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''], add_special_tokens=lowerCamelCase ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir, self.data[index]['''img'''] ) ).convert('''RGB''' )
lowercase__ = self.transforms(lowerCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [len(row['''sentence'''] ) for row in batch]
lowercase__ , lowercase__ = len(lowerCamelCase_ ), max(lowerCamelCase_ )
lowercase__ = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
lowercase__ = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
lowercase__ = input_row['''sentence''']
lowercase__ = 1
lowercase__ = torch.stack([row['''image'''] for row in batch] )
lowercase__ = torch.stack([row['''label'''] for row in batch] )
lowercase__ = torch.stack([row['''image_start_token'''] for row in batch] )
lowercase__ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def a ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def a ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""onnx"""]
def __init__( self : Optional[Any], *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, ['''onnx'''] )
@classmethod
def lowercase__ ( cls : Any, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls, ['''onnx'''] )
@classmethod
def lowercase__ ( cls : Union[str, Any], *lowerCamelCase : Any, **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''onnx'''] )
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : Tuple = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A__ : int = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, '''models/bert/''' ) )
lowercase__ = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase, '''src/transformers/models/bert/modeling_bert.py''' ), os.path.join(self.transformer_dir, '''models/bert/modeling_bert.py''' ), )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
lowercase__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase__ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
lowercase__ = black.format_str(lowerCamelCase, mode=lowerCamelCase )
lowercase__ = os.path.join(self.transformer_dir, '''new_code.py''' )
with open(lowerCamelCase, '''w''', newline='''\n''' ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=lowerCamelCase )
with open(lowerCamelCase, '''r''' ) as f:
self.assertTrue(f.read(), lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', REFERENCE_CODE + '''\n''', )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', lowerCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', re.sub('''Bert''', '''TestModel''', lowerCamelCase ), )
# Copy consistency with a really long name
lowercase__ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""", F"""{long_class_name}LMPredictionHead""", re.sub('''Bert''', lowerCamelCase, lowerCamelCase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', lowerCamelCase, overwrite_result=re.sub('''Bert''', '''TestModel''', lowerCamelCase ), )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
self.assertFalse(lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase )
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase, lowerCamelCase )
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = botoa.client('''iam''' )
lowercase__ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCamelCase_ , AssumeRolePolicyDocument=json.dumps(lowerCamelCase_ , indent=2 ) )
lowercase__ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCamelCase_ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowerCamelCase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = botoa.client('''iam''' )
return iam_client.get_role(RoleName=lowerCamelCase_ )["Role"]["Arn"]
def a ( ):
'''simple docstring'''
lowercase__ = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , lowerCamelCase_ , )
lowercase__ = None
if credentials_configuration == 0:
lowercase__ = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
lowercase__ = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
lowercase__ = _ask_field('''AWS Access Key ID: ''' )
lowercase__ = aws_access_key_id
lowercase__ = _ask_field('''AWS Secret Access Key: ''' )
lowercase__ = aws_secret_access_key
lowercase__ = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
lowercase__ = aws_region
lowercase__ = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , lowerCamelCase_ , )
if role_management == 0:
lowercase__ = _ask_field('''Enter your IAM role name: ''' )
else:
lowercase__ = '''accelerate_sagemaker_execution_role'''
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowerCamelCase_ )
lowercase__ = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCamelCase_ , error_message='''Please enter yes or no.''' , )
lowercase__ = None
if is_custom_docker_image:
lowercase__ = _ask_field('''Enter your Docker image: ''' , lambda lowerCamelCase_ : str(lowerCamelCase_ ).lower() )
lowercase__ = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCamelCase_ , error_message='''Please enter yes or no.''' , )
lowercase__ = None
if is_sagemaker_inputs_enabled:
lowercase__ = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda lowerCamelCase_ : str(lowerCamelCase_ ).lower() , )
lowercase__ = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCamelCase_ , error_message='''Please enter yes or no.''' , )
lowercase__ = None
if is_sagemaker_metrics_enabled:
lowercase__ = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda lowerCamelCase_ : str(lowerCamelCase_ ).lower() , )
lowercase__ = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
lowercase__ = {}
lowercase__ = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=lowerCamelCase_ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
lowercase__ = '''dynamo_'''
lowercase__ = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowercase__ = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCamelCase_ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
lowercase__ = _ask_options(
'''Which mode do you want to use?''' , lowerCamelCase_ , lambda lowerCamelCase_ : TORCH_DYNAMO_MODES[int(lowerCamelCase_ )] , default='''default''' , )
lowercase__ = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCamelCase_ , error_message='''Please enter yes or no.''' , )
lowercase__ = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowerCamelCase_ , error_message='''Please enter yes or no.''' , )
lowercase__ = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
lowercase__ = _ask_options(
lowerCamelCase_ , lowerCamelCase_ , lambda lowerCamelCase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCamelCase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase__ = _ask_field(lowerCamelCase_ , lambda lowerCamelCase_ : str(lowerCamelCase_ ).lower() , default='''ml.p3.2xlarge''' )
lowercase__ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase__ = _ask_field(
'''How many machines do you want use? [1]: ''' , lowerCamelCase_ , default=1 , )
lowercase__ = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=lowerCamelCase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCamelCase_ , use_cpu=lowerCamelCase_ , dynamo_config=lowerCamelCase_ , eca_instance_type=lowerCamelCase_ , profile=lowerCamelCase_ , region=lowerCamelCase_ , iam_role_name=lowerCamelCase_ , mixed_precision=lowerCamelCase_ , num_machines=lowerCamelCase_ , sagemaker_inputs_file=lowerCamelCase_ , sagemaker_metrics_file=lowerCamelCase_ , )
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
import collections
import os
import re
from pathlib import Path
A__ : Optional[Any] = 'src/transformers'
# Matches is_xxx_available()
A__ : List[str] = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
A__ : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A__ : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
A__ : Optional[int] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
A__ : Dict = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A__ : Any = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
A__ : Dict = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
A__ : Tuple = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
A__ : List[str] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
A__ : Any = re.compile(r'^\s*try:')
# Catches a line with else:
A__ : Optional[Any] = re.compile(r'^\s*else:')
def a ( lowerCamelCase_ ):
'''simple docstring'''
if _re_test_backend.search(lowerCamelCase_ ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(lowerCamelCase_ )]
backends.sort()
return "_and_".join(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_ ):
lowercase__ = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0]
lowercase__ = re.findall(r'''\[([^\]]+)\]''' , lowerCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(lowerCamelCase_ )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None:
lowercase__ = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_between_brackets.search(lowerCamelCase_ ) is not None:
lowercase__ = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_quote_object.search(lowerCamelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(lowerCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
def find_duplicates(lowerCamelCase_ ):
return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = os.path.join(lowerCamelCase_ , '''__init__.py''' )
lowercase__ = parse_init(lowerCamelCase_ )
if objects is not None:
lowercase__ = analyze_results(*lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowercase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
raise ValueError('''\n\n'''.join(lowerCamelCase_ ) )
def a ( ):
'''simple docstring'''
lowercase__ = []
for path, directories, files in os.walk(lowerCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowerCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase__ = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) )
lowercase__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(lowerCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) )
lowercase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowerCamelCase_ )
return submodules
A__ : List[str] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def a ( ):
'''simple docstring'''
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase__ = direct_transformers_import(lowerCamelCase_ )
lowercase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , '''r''' ) as f:
lowercase__ = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , lowerCamelCase_ ) ) )
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase_ ) > 0:
lowercase__ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase, cache_dir=lowerCamelCase )
lowercase__ = [t[-1] for t in os.walk(os.path.join(lowerCamelCase, os.listdir(lowerCamelCase )[0], '''snapshots''' ) )]
lowercase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 4
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''flax''', safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = FlaxDDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, scheduler=lowerCamelCase, safety_checker=lowerCamelCase, )
lowercase__ = scheduler.create_state()
lowercase__ = scheduler_state
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, )
lowercase__ = replicate(lowerCamelCase )
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, use_memory_efficient_attention=lowerCamelCase, )
lowercase__ = replicate(lowerCamelCase )
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase_ )
lowercase__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
lowercase__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
lowercase__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowercase__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowercase__ = F"""layers_{str(lowerCamelCase_ )}"""
# Self-Attention
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase__ = flax_model.params['''encoder''']['''block'''][str(lowerCamelCase_ )]['''layer''']
lowercase__ = tax_attention_key
lowercase__ = tax_attention_out
lowercase__ = tax_attention_query
lowercase__ = tax_attention_value
lowercase__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = tax_global_layer_norm
if split_mlp_wi:
lowercase__ = tax_mlp_wi_a
lowercase__ = tax_mlp_wi_a
else:
lowercase__ = tax_mlp_wi
lowercase__ = tax_mlp_wo
lowercase__ = tax_mlp_layer_norm
lowercase__ = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowercase__ = tax_encoder_global_rel_embedding
# Assigning
lowercase__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowercase__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase__ = F"""layers_{str(lowerCamelCase_ )}"""
# Self-Attention
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowercase__ = tax_enc_dec_attention_module['''key''']['''kernel''']
lowercase__ = tax_enc_dec_attention_module['''out''']['''kernel''']
lowercase__ = tax_enc_dec_attention_module['''query''']['''kernel''']
lowercase__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase__ = flax_model.params['''decoder''']['''block'''][str(lowerCamelCase_ )]['''layer''']
lowercase__ = tax_attention_key
lowercase__ = tax_attention_out
lowercase__ = tax_attention_query
lowercase__ = tax_attention_value
lowercase__ = tax_pre_attention_layer_norm
lowercase__ = tax_enc_dec_attention_key
lowercase__ = tax_enc_dec_attention_out
lowercase__ = tax_enc_dec_attention_query
lowercase__ = tax_enc_dec_attention_value
lowercase__ = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ = tax_mlp_wi_a
lowercase__ = tax_mlp_wi_a
else:
lowercase__ = tax_mlp_wi
lowercase__ = tax_mlp_wo
lowercase__ = txa_mlp_layer_norm
lowercase__ = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowercase__ = txa_decoder_norm
# Only for layer 0:
lowercase__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase__ = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ = tax_model['''target''']['''token_embedder''']['''embedding''']
lowercase__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowerCamelCase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
A__ : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def a ( lowerCamelCase_ = 200_0000 ):
'''simple docstring'''
lowercase__ = [0]
lowercase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase__ = 0
# the area corresponding to the grid that gives the product closest to target
lowercase__ = 0
# an estimate of b, using the quadratic formula
lowercase__ = 42
# the largest integer less than b_estimate
lowercase__ = 42
# the largest integer less than b_estimate
lowercase__ = 42
# the triangle number corresponding to b_floor
lowercase__ = 42
# the triangle number corresponding to b_ceil
lowercase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowercase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase__ = floor(lowerCamelCase_ )
lowercase__ = ceil(lowerCamelCase_ )
lowercase__ = triangle_numbers[b_floor]
lowercase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ = triangle_b_first_guess * triangle_a
lowercase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ = triangle_b_second_guess * triangle_a
lowercase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from __future__ import annotations
A__ : Tuple = tuple[int, int, int]
A__ : List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
A__ : Union[str, Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
A__ : List[str] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
A__ : List[Any] = 'FOBHMDKEXQNRAULPGSJVTYICZW'
A__ : Any = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
A__ : Optional[Any] = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
A__ : Optional[Any] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
A__ : Any = 'SGLCPQWZHKXAREONTFBVIYJUDM'
A__ : Dict = 'HVSICLTYKQUBXDWAJZOMFGPREN'
A__ : List[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
A__ : Any = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
A__ : str = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCamelCase_ ) )) < 3:
lowercase__ = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCamelCase_ )
# Checks if rotor positions are valid
lowercase__ , lowercase__ , lowercase__ = rotpos
if not 0 < rotorposa <= len(lowerCamelCase_ ):
lowercase__ = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
lowercase__ = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
lowercase__ = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCamelCase_ )
# Validates string and returns dict
lowercase__ = _plugboard(lowerCamelCase_ )
return rotpos, rotsel, pbdict
def a ( lowerCamelCase_ ):
'''simple docstring'''
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = F"""Plugboard setting isn't type string ({type(lowerCamelCase_ )})"""
raise TypeError(lowerCamelCase_ )
elif len(lowerCamelCase_ ) % 2 != 0:
lowercase__ = F"""Odd number of symbols ({len(lowerCamelCase_ )})"""
raise Exception(lowerCamelCase_ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
lowercase__ = set()
for i in pbstring:
if i not in abc:
lowercase__ = F"""'{i}' not in list of symbols"""
raise Exception(lowerCamelCase_ )
elif i in tmppbl:
lowercase__ = F"""Duplicate symbol ({i})"""
raise Exception(lowerCamelCase_ )
else:
tmppbl.add(lowerCamelCase_ )
del tmppbl
# Created the dictionary
lowercase__ = {}
for j in range(0 , len(lowerCamelCase_ ) - 1 , 2 ):
lowercase__ = pbstring[j + 1]
lowercase__ = pbstring[j]
return pb
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (rotora, rotora, rotora) , lowerCamelCase_ = "" , ):
'''simple docstring'''
lowercase__ = text.upper()
lowercase__ , lowercase__ , lowercase__ = _validator(
lowerCamelCase_ , lowerCamelCase_ , plugb.upper() )
lowercase__ , lowercase__ , lowercase__ = rotor_position
lowercase__ , lowercase__ , lowercase__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase__ = plugboard[symbol]
# rotor ra --------------------------
lowercase__ = abc.index(lowerCamelCase_ ) + rotorposa
lowercase__ = rotora[index % len(lowerCamelCase_ )]
# rotor rb --------------------------
lowercase__ = abc.index(lowerCamelCase_ ) + rotorposa
lowercase__ = rotora[index % len(lowerCamelCase_ )]
# rotor rc --------------------------
lowercase__ = abc.index(lowerCamelCase_ ) + rotorposa
lowercase__ = rotora[index % len(lowerCamelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase__ = reflector[symbol]
# 2nd rotors
lowercase__ = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
lowercase__ = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
lowercase__ = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
lowercase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
lowercase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
lowercase__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
A__ : Tuple = 'This is my Python script that emulates the Enigma machine from WWII.'
A__ : Dict = (1, 1, 1)
A__ : Optional[int] = 'pictures'
A__ : Dict = (rotora, rotora, rotora)
A__ : Union[str, Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__ : str = None
try:
import msvcrt
except ImportError:
A__ : Any = None
try:
import fcntl
except ImportError:
A__ : Dict = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__ : Any = OSError
# Data
# ------------------------------------------------
A__ : int = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
A__ : int = '3.0.12'
A__ : List[str] = None
def a ( ):
'''simple docstring'''
global _logger
lowercase__ = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = lock_file
return None
def __str__( self : Dict ):
'''simple docstring'''
lowercase__ = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = lock
return None
def __enter__( self : Tuple ):
'''simple docstring'''
return self.lock
def __exit__( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : Dict, lowerCamelCase : str=-1, lowerCamelCase : str=None ):
'''simple docstring'''
lowercase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ = self.hash_filename_if_too_long(lowerCamelCase, lowerCamelCase )
# The path to the lock file.
lowercase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ = None
# The default timeout value.
lowercase__ = timeout
# We use this lock primarily for the lock counter.
lowercase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ = 0
return None
@property
def lowercase__ ( self : Any ):
'''simple docstring'''
return self._lock_file
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = float(lowerCamelCase )
return None
def lowercase__ ( self : Tuple ):
'''simple docstring'''
raise NotImplementedError()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[Any]=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ = id(self )
lowercase__ = self._lock_file
lowercase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(lowerCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ = max(0, self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowercase__ ( self : Tuple, lowerCamelCase : Any=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ = id(self )
lowercase__ = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowercase__ = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : Dict ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : str ):
'''simple docstring'''
self.release()
return None
def __del__( self : Tuple ):
'''simple docstring'''
self.release(force=lowerCamelCase )
return None
def lowercase__ ( self : Tuple, lowerCamelCase : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = os.path.basename(lowerCamelCase )
if len(lowerCamelCase ) > max_length and max_length > 0:
lowercase__ = os.path.dirname(lowerCamelCase )
lowercase__ = str(hash(lowerCamelCase ) )
lowercase__ = filename[: max_length - len(lowerCamelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase, lowerCamelCase )
else:
return path
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any]=-1, lowerCamelCase : Any=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase, timeout=lowerCamelCase, max_filename_length=lowerCamelCase )
lowercase__ = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file, lowerCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase, msvcrt.LK_NBLCK, 1 )
except OSError:
os.close(lowerCamelCase )
else:
lowercase__ = fd
return None
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self._lock_file_fd
lowercase__ = None
msvcrt.locking(lowerCamelCase, msvcrt.LK_UNLCK, 1 )
os.close(lowerCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : str=-1, lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
lowercase__ = os.statvfs(os.path.dirname(lowerCamelCase ) ).f_namemax
super().__init__(lowerCamelCase, timeout=lowerCamelCase, max_filename_length=lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ = os.open(self._lock_file, lowerCamelCase )
try:
fcntl.flock(lowerCamelCase, fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase )
else:
lowercase__ = fd
return None
def lowercase__ ( self : int ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ = self._lock_file_fd
lowercase__ = None
fcntl.flock(lowerCamelCase, fcntl.LOCK_UN )
os.close(lowerCamelCase )
return None
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file, lowerCamelCase )
except OSError:
pass
else:
lowercase__ = fd
return None
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
os.close(self._lock_file_fd )
lowercase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__ : str = None
if msvcrt:
A__ : Union[str, Any] = WindowsFileLock
elif fcntl:
A__ : List[Any] = UnixFileLock
else:
A__ : Dict = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Optional[int] = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any], lowerCamelCase : Path, lowerCamelCase : Union[str, None] = None, lowerCamelCase : Union[List[str], None] = None, lowerCamelCase : Union[str, List[str], None] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
lowercase__ = [file for file in os.listdir(lowerCamelCase ) if os.path.isfile(os.path.join(lowerCamelCase, lowerCamelCase ) )]
if identifier is not None:
lowercase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase, lowerCamelCase ):
for n_ in n_identifier:
lowercase__ = [file for file in files if n_ not in file]
else:
lowercase__ = [file for file in files if n_identifier not in file]
lowercase__ = ignore_files or []
ignore_files.append('''__init__.py''' )
lowercase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''', lowerCamelCase )
if only_modules:
lowercase__ = file.split('''.''' )[0]
try:
lowercase__ = getattr(lowerCamelCase, lowerCamelCase )
lowercase__ = doctest.DocTestSuite(lowerCamelCase )
lowercase__ = unittest.TextTestRunner().run(lowerCamelCase )
self.assertIs(len(result.failures ), 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
lowercase__ = doctest.testfile(str('''..''' / directory / file ), optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed, 0 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = Path('''src/transformers''' )
lowercase__ = '''modeling'''
lowercase__ = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowerCamelCase, identifier=lowerCamelCase, ignore_files=lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = Path('''src/transformers''' )
lowercase__ = '''tokenization'''
self.analyze_directory(lowerCamelCase, identifier=lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = Path('''src/transformers''' )
lowercase__ = '''configuration'''
self.analyze_directory(lowerCamelCase, identifier=lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = Path('''src/transformers''' )
lowercase__ = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowerCamelCase, n_identifier=lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = Path('''docs/source''' )
lowercase__ = ['''favicon.ico''']
self.analyze_directory(lowerCamelCase, ignore_files=lowerCamelCase, only_modules=lowerCamelCase )
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
from datetime import datetime as dt
import os
from github import Github
A__ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def a ( ):
'''simple docstring'''
lowercase__ = Github(os.environ['''GITHUB_TOKEN'''] )
lowercase__ = g.get_repo('''huggingface/transformers''' )
lowercase__ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowercase__ = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
lowercase__ = comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ = np.zeros((n + 1,) )
lowercase__ = ya
lowercase__ = xa
for k in range(lowerCamelCase_ ):
lowercase__ = y[k] + step_size * ode_func(lowerCamelCase_ , y[k] )
lowercase__ = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase_ , y[k] ) + ode_func(x + step_size , lowerCamelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''num_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Dict=64, lowerCamelCase : Any=3, lowerCamelCase : List[Any]=[16, 48, 96], lowerCamelCase : List[Any]=[1, 3, 6], lowerCamelCase : List[str]=[1, 2, 10], lowerCamelCase : Optional[Any]=[7, 3, 3], lowerCamelCase : Tuple=[4, 2, 2], lowerCamelCase : Union[str, Any]=[2, 1, 1], lowerCamelCase : Tuple=[2, 2, 2], lowerCamelCase : Optional[int]=[False, False, True], lowerCamelCase : Optional[int]=[0.0, 0.0, 0.0], lowerCamelCase : Tuple=0.02, lowerCamelCase : Any=1E-12, lowerCamelCase : int=True, lowerCamelCase : str=True, lowerCamelCase : List[str]=2, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = stride_kv
lowercase__ = depth
lowercase__ = cls_token
lowercase__ = attention_drop_rate
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = TFCvtModel(config=lowerCamelCase )
lowercase__ = model(lowerCamelCase, training=lowerCamelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFCvtForImageClassification(lowerCamelCase )
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase, training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = TFCvtModelTester(self )
lowercase__ = TFCvtConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ):
lowercase__ = model_class(lowerCamelCase )
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFCvtModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''tf''' )
# forward pass
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), lowerCamelCase, atol=1E-4 ) )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowerCamelCase ) for k, v in self.__dict__.items()} )
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = float(lowerCamelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
lowercase__ = decimal - int(lowerCamelCase_ )
if fractional_part == 0:
return int(lowerCamelCase_ ), 1
else:
lowercase__ = len(str(lowerCamelCase_ ).split('''.''' )[1] )
lowercase__ = int(decimal * (10**number_of_frac_digits) )
lowercase__ = 10**number_of_frac_digits
lowercase__ , lowercase__ = denominator, numerator
while True:
lowercase__ = dividend % divisor
if remainder == 0:
break
lowercase__ , lowercase__ = divisor, remainder
lowercase__ , lowercase__ = numerator / divisor, denominator / divisor
return int(lowerCamelCase_ ), int(lowerCamelCase_ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ = """CIDAS/clipseg-rd64-refined"""
lowercase__ = """image_segmenter"""
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ["""image""", """text"""]
lowercase__ = ["""image"""]
def __init__( self : Optional[Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self, ['''vision'''] )
super().__init__(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : "Image", lowerCamelCase : str ):
'''simple docstring'''
return self.pre_processor(text=[label], images=[image], padding=lowerCamelCase, return_tensors='''pt''' )
def lowercase__ ( self : Any, lowerCamelCase : Optional[int] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = self.model(**lowerCamelCase ).logits
return logits
def lowercase__ ( self : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = outputs.cpu().detach().numpy()
lowercase__ = 0
lowercase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : str, lowerCamelCase : List[Any]=13, lowerCamelCase : List[str]=7, lowerCamelCase : Optional[Any]=True, lowerCamelCase : str=True, lowerCamelCase : List[str]=True, lowerCamelCase : Dict=True, lowerCamelCase : Tuple=99, lowerCamelCase : Optional[int]=32, lowerCamelCase : Any=5, lowerCamelCase : Dict=4, lowerCamelCase : str=37, lowerCamelCase : int="gelu", lowerCamelCase : List[Any]=0.1, lowerCamelCase : List[str]=0.1, lowerCamelCase : Tuple=512, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : List[str]=2, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : Dict=3, lowerCamelCase : Dict=4, lowerCamelCase : Dict=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def lowercase__ ( self : Dict, lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = NystromformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase )
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = NystromformerForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = NystromformerForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = NystromformerForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = NystromformerForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = NystromformerForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = NystromformerModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = NystromformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape, lowerCamelCase )
lowercase__ = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''the [MASK] of Belgium is Brussels'''
lowercase__ = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase__ = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase__ = tokenizer(lowerCamelCase, return_tensors='''pt''' )
with torch.no_grad():
lowercase__ = model(encoding.input_ids ).logits
lowercase__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCamelCase ), '''capital''' )
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
def a ( ):
'''simple docstring'''
return 1
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCamelCase_ )
def a ( lowerCamelCase_ = 200 ):
'''simple docstring'''
return two_pound(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowercase__ = False
if num < 0:
lowercase__ = True
lowercase__ = -num
lowercase__ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowerCamelCase_ ) for e in binary )
return "0b" + "".join(str(lowerCamelCase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def a ( lowerCamelCase_ ):
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def a ( ):
'''simple docstring'''
lowercase__ = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=lowerCamelCase_ )
lowercase__ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
TestCommand.register_subcommand(lowerCamelCase_ )
RunBeamCommand.register_subcommand(lowerCamelCase_ )
DummyDataCommand.register_subcommand(lowerCamelCase_ )
# Parse args
lowercase__ , lowercase__ = parser.parse_known_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
lowercase__ = parse_unknown_args(lowerCamelCase_ )
# Run
lowercase__ = args.func(lowerCamelCase_ , **lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(lowerCamelCase_ ) )
lowercase__ = [list(lowerCamelCase_ ) for char in range(len(lowerCamelCase_ ) )]
for index in range(len(lowerCamelCase_ ) ):
num_transpositions[index].pop(lowerCamelCase_ )
return max(
int(''''''.join(list(lowerCamelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
from __future__ import annotations
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(lowerCamelCase_ ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Any = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
A__ : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
A__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , '''rb''' ) as f:
lowercase__ = Image.open(lowerCamelCase_ )
return im.convert('''RGB''' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default=A__ ,metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """A folder containing the training data."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """A folder containing the validation data."""} )
lowercase__ = field(
default=0.15 ,metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
def lowercase__ ( self : Any ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default="""google/vit-base-patch16-224-in21k""" ,metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(A__ )} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase__ = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
lowercase__ = field(default=A__ ,metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} ,)
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase__ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ = {}
if data_args.train_dir is not None:
lowercase__ = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
lowercase__ = os.path.join(data_args.validation_dir , '''**''' )
lowercase__ = load_dataset(
'''imagefolder''' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase_ ) and data_args.train_val_split > 0.0:
lowercase__ = dataset['''train'''].train_test_split(data_args.train_val_split )
lowercase__ = split['''train''']
lowercase__ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ = dataset['''train'''].features['''labels'''].names
lowercase__ , lowercase__ = {}, {}
for i, label in enumerate(lowerCamelCase_ ):
lowercase__ = str(lowerCamelCase_ )
lowercase__ = label
# Load the accuracy metric from the datasets package
lowercase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel=lowerCamelCase_ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size['''shortest_edge''']
else:
lowercase__ = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ = Compose(
[
RandomResizedCrop(lowerCamelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ = Compose(
[
Resize(lowerCamelCase_ ),
CenterCrop(lowerCamelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCamelCase_ ):
lowercase__ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowerCamelCase_ ):
lowercase__ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCamelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCamelCase_ )
# Initalize our trainer
lowercase__ = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCamelCase_ )
trainer.save_metrics('''eval''' , lowerCamelCase_ )
# Write model card and (optionally) push to hub
lowercase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
if __name__ == "__main__":
main()
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (DPMSolverSinglestepScheduler,)
lowercase__ = (("""num_inference_steps""", 25),)
def lowercase__ ( self : List[Any], **lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self : str, lowerCamelCase : str=0, **lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ , lowercase__ = sample, sample
for t in range(lowerCamelCase, time_step + scheduler.config.solver_order + 1 ):
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int]=0, **lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str]=None, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if scheduler is None:
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
return sample
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase__ = 50
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase__ = self.full_loop(scheduler=lowerCamelCase )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
lowercase__ = DEISMultistepScheduler.from_config(scheduler.config )
lowercase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase__ = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase__ = self.full_loop(scheduler=lowerCamelCase )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase, prediction_type=lowerCamelCase, sample_max_value=lowerCamelCase, algorithm_type='''dpmsolver++''', solver_order=lowerCamelCase, solver_type=lowerCamelCase, )
def lowercase__ ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
lowercase__ = self.full_loop(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowercase__ ( self : Any ):
'''simple docstring'''
self.check_over_configs(variance_type=lowerCamelCase )
self.check_over_configs(variance_type='''learned_range''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=0 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.full_loop(use_karras_sigmas=lowerCamelCase )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.full_loop(prediction_type='''v_prediction''' )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.full_loop(prediction_type='''v_prediction''', use_karras_sigmas=lowerCamelCase )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(thresholding=lowerCamelCase, dynamic_thresholding_ratio=0 )
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : List[str] = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['DeiTFeatureExtractor']
A__ : str = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
import math
def a ( lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 ):
'''simple docstring'''
lowercase__ = end or len(lowerCamelCase_ )
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = i
lowercase__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowercase__ = array[temp_index - 1]
temp_index -= 1
lowercase__ = temp_index_value
return array
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): # Max Heap
'''simple docstring'''
lowercase__ = index
lowercase__ = 2 * index + 1 # Left Node
lowercase__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowercase__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowercase__ = right_index
if largest != index:
lowercase__ , lowercase__ = array[largest], array[index]
heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase_ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(n - 1 , 0 , -1 ):
lowercase__ , lowercase__ = array[0], array[i]
heapify(lowerCamelCase_ , 0 , lowerCamelCase_ )
return array
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = low
lowercase__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowercase__ , lowercase__ = array[j], array[i]
i += 1
def a ( lowerCamelCase_ ):
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
return array
lowercase__ = 2 * math.ceil(math.loga(len(lowerCamelCase_ ) ) )
lowercase__ = 16
return intro_sort(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase_ )
max_depth -= 1
lowercase__ = median_of_a(lowerCamelCase_ , lowerCamelCase_ , start + ((end - start) // 2) + 1 , end - 1 )
lowercase__ = partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
intro_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = p
return insertion_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Tuple = input('Enter numbers separated by a comma : ').strip()
A__ : Union[str, Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
from jiwer import compute_measures
import datasets
A__ : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A__ : List[str] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A__ : Any = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
], )
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase, lowerCamelCase )["wer"]
else:
lowercase__ = 0
lowercase__ = 0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase ):
lowercase__ = compute_measures(lowerCamelCase, lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Optional[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
A__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Union[str, Any] = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
A__ : int = {
'unc-nlp/lxmert-base-uncased': 5_12,
}
A__ : List[Any] = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LxmertTokenizer
def __init__( self : str, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int="[UNK]", lowerCamelCase : Optional[int]="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Dict="[CLS]", lowerCamelCase : List[str]="[MASK]", lowerCamelCase : Union[str, Any]=True, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : int, lowerCamelCase : Optional[Any], lowerCamelCase : Any=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : int, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = XCLIPTextConfig()
# derive patch size from model name
lowercase__ = model_name.find('''patch''' )
lowercase__ = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
lowercase__ = XCLIPVisionConfig(patch_size=lowerCamelCase_ , num_frames=lowerCamelCase_ )
if "large" in model_name:
lowercase__ = 768
lowercase__ = 3072
lowercase__ = 12
lowercase__ = 1024
lowercase__ = 4096
lowercase__ = 16
lowercase__ = 24
lowercase__ = 768
lowercase__ = 3072
if model_name == "xclip-large-patch14-16-frames":
lowercase__ = 336
lowercase__ = XCLIPConfig.from_text_vision_configs(lowerCamelCase_ , lowerCamelCase_ )
if "large" in model_name:
lowercase__ = 768
return config
def a ( lowerCamelCase_ ):
'''simple docstring'''
# text encoder
if name == "token_embedding.weight":
lowercase__ = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
lowercase__ = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
lowercase__ = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
lowercase__ = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
lowercase__ = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
lowercase__ = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
lowercase__ = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
lowercase__ = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
lowercase__ = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
lowercase__ = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
lowercase__ = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
lowercase__ = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
lowercase__ = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
lowercase__ = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
lowercase__ = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
lowercase__ = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
lowercase__ = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
lowercase__ = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
lowercase__ = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
lowercase__ = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
lowercase__ = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
lowercase__ = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(lowerCamelCase_ )
if "attn.in_proj" in key:
lowercase__ = key.split('''.''' )
if key.startswith('''visual''' ):
lowercase__ = key_split[3]
lowercase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase__ = val[
:dim, :
]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[
-dim:, :
]
else:
lowercase__ = val[
:dim
]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[
-dim:
]
else:
if "weight" in key:
lowercase__ = val[
:dim, :
]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[
-dim:, :
]
else:
lowercase__ = val[:dim]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[-dim:]
elif key.startswith('''mit''' ):
lowercase__ = key_split[2]
lowercase__ = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
else:
lowercase__ = key_split[2]
lowercase__ = config.text_config.hidden_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[-dim:]
else:
lowercase__ = rename_key(lowerCamelCase_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase__ = val.T
lowercase__ = val
return orig_state_dict
def a ( lowerCamelCase_ ):
'''simple docstring'''
if num_frames == 8:
lowercase__ = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
lowercase__ = '''eating_spaghetti.npy'''
elif num_frames == 32:
lowercase__ = '''eating_spaghetti_32_frames.npy'''
lowercase__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowerCamelCase_ , repo_type='''dataset''' , )
lowercase__ = np.load(lowerCamelCase_ )
return list(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ):
'''simple docstring'''
lowercase__ = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
lowercase__ = model_to_url[model_name]
lowercase__ = 8
if "16-frames" in model_name:
lowercase__ = 16
elif "shot" in model_name:
lowercase__ = 32
lowercase__ = get_xclip_config(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = XCLIPModel(lowerCamelCase_ )
model.eval()
if "drive" in checkpoint_url:
lowercase__ = '''pytorch_model.bin'''
gdown.cached_download(lowerCamelCase_ , lowerCamelCase_ , quiet=lowerCamelCase_ )
lowercase__ = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
else:
lowercase__ = torch.hub.load_state_dict_from_url(lowerCamelCase_ )['''model''']
lowercase__ = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = XCLIPModel(lowerCamelCase_ )
lowercase__ , lowercase__ = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase__ = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
lowercase__ = VideoMAEImageProcessor(size=lowerCamelCase_ )
lowercase__ = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
lowercase__ = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
lowercase__ = XCLIPProcessor(image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
lowercase__ = prepare_video(lowerCamelCase_ )
lowercase__ = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
lowercase__ = model(**lowerCamelCase_ )
# Verify outputs
lowercase__ = outputs.logits_per_video
lowercase__ = logits_per_video.softmax(dim=1 )
print('''Probs:''' , lowerCamelCase_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase__ = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase__ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowercase__ = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase__ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowercase__ = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase__ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase__ = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase__ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase__ = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase__ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase__ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase__ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase__ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase__ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase__ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase__ = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase__ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase__ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(lowerCamelCase_ , organization='''nielsr''' )
processor.push_to_hub(lowerCamelCase_ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(lowerCamelCase_ , organization='''nielsr''' )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.