code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __magic_name__ (__lowercase ): lowerCamelCase__ = '''''' lowerCamelCase__ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowerCamelCase__ = None # compression type in fsspec. ex: "gzip" lowerCamelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self , _a = "" , _a = None , _a = None , **_a ) -> str: super().__init__(self , **_a ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCAmelCase_ = fsspec.open( _a , mode="rb" , protocol=_a , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCAmelCase_ = os.path.basename(self.file.path.split("::" )[0] ) lowerCAmelCase_ = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) lowerCAmelCase_ = None @classmethod def __a ( cls , _a ) -> Optional[Any]: # compressed file paths are always relative to the archive root return super()._strip_protocol(_a ).lstrip("/" ) def __a ( self ) -> Tuple: if self.dir_cache is None: lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} lowerCAmelCase_ = {f["name"]: f} def __a ( self , _a ) -> List[str]: return self.file.open().read() def __a ( self , _a , _a = "rb" , _a=None , _a=True , _a=None , **_a , ) -> str: lowerCAmelCase_ = self._strip_protocol(_a ) if mode != "rb": raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" ) return self.file.open() class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bz2''' lowerCamelCase__ = '''bz2''' lowerCamelCase__ = '''.bz2''' class __magic_name__ (__lowercase ): lowerCamelCase__ = '''gzip''' lowerCamelCase__ = '''gzip''' lowerCamelCase__ = '''.gz''' class __magic_name__ (__lowercase ): lowerCamelCase__ = '''lz4''' lowerCamelCase__ = '''lz4''' lowerCamelCase__ = '''.lz4''' class __magic_name__ (__lowercase ): lowerCamelCase__ = '''xz''' lowerCamelCase__ = '''xz''' lowerCamelCase__ = '''.xz''' class __magic_name__ (__lowercase ): lowerCamelCase__ = '''zstd''' lowerCamelCase__ = '''zstd''' lowerCamelCase__ = '''.zst''' def __init__( self , _a , _a = "rb" , _a = None , _a = None , _a = DEFAULT_BLOCK_SIZE , **_a , ) -> Optional[Any]: super().__init__( fo=_a , mode=_a , target_protocol=_a , target_options=_a , block_size=_a , **_a , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCAmelCase_ = self.file.__enter__ class __magic_name__ : def __init__( self , _a ) -> int: lowerCAmelCase_ = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self , *_a , **_a ) -> Dict: self._file.__exit__(*_a , **_a ) def __iter__( self ) -> Optional[Any]: return iter(self._file ) def __a ( self ) -> Optional[int]: return next(self._file ) def __getattr__( self , _a ) -> Union[str, Any]: return getattr(self._file , _a ) def fixed_enter(*_a , **_a ): return WrappedFile(_enter(*_a , **_a ) ) lowerCAmelCase_ = fixed_enter
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __magic_name__ (__lowercase , __lowercase ): @register_to_config def __init__( self , *, _a = 4 , _a = 768 , _a , _a , ) -> Optional[int]: super().__init__() lowerCAmelCase_ = nn.Parameter(torch.zeros(_a ) ) # parameters for additional clip time embeddings lowerCAmelCase_ = nn.Linear(_a , _a ) lowerCAmelCase_ = nn.Linear(_a , _a ) # parameters for encoder hidden states lowerCAmelCase_ = clip_extra_context_tokens lowerCAmelCase_ = nn.Linear( _a , self.clip_extra_context_tokens * cross_attention_dim ) lowerCAmelCase_ = nn.Linear(_a , _a ) lowerCAmelCase_ = nn.LayerNorm(_a ) def __a ( self , *, _a , _a , _a , _a ) -> Any: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowerCAmelCase_ = image_embeddings.shape[0] lowerCAmelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowerCAmelCase_ = classifier_free_guidance_embeddings.expand( _a , -1 ) lowerCAmelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowerCAmelCase_ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowerCAmelCase_ = self.embedding_proj(_a ) lowerCAmelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_a ) lowerCAmelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowerCAmelCase_ = self.clip_extra_context_tokens_proj(_a ) lowerCAmelCase_ = clip_extra_context_tokens.reshape(_a , -1 , self.clip_extra_context_tokens ) lowerCAmelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowerCAmelCase_ = self.encoder_hidden_states_proj(_a ) lowerCAmelCase_ = self.text_encoder_hidden_states_norm(_a ) lowerCAmelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''encodec''' def __init__( self , _a=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , _a=24000 , _a=1 , _a=False , _a=None , _a=None , _a=128 , _a=32 , _a=1 , _a=[8, 5, 4, 2] , _a="weight_norm" , _a=7 , _a=7 , _a=3 , _a=2 , _a=True , _a="reflect" , _a=2 , _a=2 , _a=1.0 , _a=1024 , _a=None , _a=True , **_a , ) -> Tuple: lowerCAmelCase_ = target_bandwidths lowerCAmelCase_ = sampling_rate lowerCAmelCase_ = audio_channels lowerCAmelCase_ = normalize lowerCAmelCase_ = chunk_length_s lowerCAmelCase_ = overlap lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_filters lowerCAmelCase_ = num_residual_layers lowerCAmelCase_ = upsampling_ratios lowerCAmelCase_ = norm_type lowerCAmelCase_ = kernel_size lowerCAmelCase_ = last_kernel_size lowerCAmelCase_ = residual_kernel_size lowerCAmelCase_ = dilation_growth_rate lowerCAmelCase_ = use_causal_conv lowerCAmelCase_ = pad_mode lowerCAmelCase_ = compress lowerCAmelCase_ = num_lstm_layers lowerCAmelCase_ = trim_right_ratio lowerCAmelCase_ = codebook_size lowerCAmelCase_ = codebook_dim if codebook_dim is not None else hidden_size lowerCAmelCase_ = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**_a ) @property def __a ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __a ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def __a ( self ) -> int: lowerCAmelCase_ = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __a ( self ) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
import math def A(__a: float , __a: float ): return math.pow(__a , 2 ) - a def A(__a: float ): return 2 * x def A(__a: float ): lowerCAmelCase_ = 2.0 while start <= a: lowerCAmelCase_ = math.pow(__a , 2 ) return start def A(__a: float , __a: int = 9999 , __a: float = 0.00_0000_0000_0001 ): if a < 0: raise ValueError("math domain error" ) lowerCAmelCase_ = get_initial_point(__a ) for _ in range(__a ): lowerCAmelCase_ = value lowerCAmelCase_ = value - fx(__a , __a ) / fx_derivative(__a ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCamelCase__ = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
from collections.abc import Sequence def A(__a: Sequence[float] , __a: bool = False ): if not arr: return 0 lowerCAmelCase_ = 0 if allow_empty_subarrays else float("-inf" ) lowerCAmelCase_ = 0.0 for num in arr: lowerCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase_ = max(__a , __a ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
from heapq import heappop, heappush import numpy as np def A(__a: np.ndarray , __a: tuple[int, int] , __a: tuple[int, int] , __a: bool , ): lowerCAmelCase_ , lowerCAmelCase_ = grid.shape lowerCAmelCase_ = [-1, 1, 0, 0] lowerCAmelCase_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowerCAmelCase_ , lowerCAmelCase_ = [(0, source)], set() lowerCAmelCase_ = np.full((rows, cols) , np.inf ) lowerCAmelCase_ = 0 lowerCAmelCase_ = np.empty((rows, cols) , dtype=__a ) lowerCAmelCase_ = None while queue: ((lowerCAmelCase_) , (lowerCAmelCase_)) = heappop(__a ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowerCAmelCase_ = [] while (x, y) != source: path.append((x, y) ) lowerCAmelCase_ , lowerCAmelCase_ = predecessors[x, y] path.append(__a ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__a ) ): lowerCAmelCase_ , lowerCAmelCase_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowerCAmelCase_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__a , (dist + 1, (nx, ny)) ) lowerCAmelCase_ = dist + 1 lowerCAmelCase_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def A(__a: Dict ): return 1.0 / (1.0 + np.exp(-_outputs )) def A(__a: Union[str, Any] ): lowerCAmelCase_ = np.max(_outputs , axis=-1 , keepdims=__a ) lowerCAmelCase_ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''sigmoid''' lowerCamelCase__ = '''softmax''' lowerCamelCase__ = '''none''' @add_end_docstrings( __lowercase , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class __magic_name__ (__lowercase ): lowerCamelCase__ = False lowerCamelCase__ = ClassificationFunction.NONE def __init__( self , **_a ) -> List[str]: super().__init__(**_a ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __a ( self , _a=None , _a=None , _a="" , **_a ) -> Dict: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" lowerCAmelCase_ = tokenizer_kwargs lowerCAmelCase_ = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: lowerCAmelCase_ = self.model.config.return_all_scores if isinstance(_a , _a ) or top_k is None: lowerCAmelCase_ = top_k lowerCAmelCase_ = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , _a , ) if return_all_scores: lowerCAmelCase_ = None else: lowerCAmelCase_ = 1 if isinstance(_a , _a ): lowerCAmelCase_ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase_ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *_a , **_a ) -> Any: lowerCAmelCase_ = super().__call__(*_a , **_a ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase_ = "top_k" not in kwargs if isinstance(args[0] , _a ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __a ( self , _a , **_a ) -> Dict[str, GenericTensor]: lowerCAmelCase_ = self.framework if isinstance(_a , _a ): return self.tokenizer(**_a , return_tensors=_a , **_a ) elif isinstance(_a , _a ) and len(_a ) == 1 and isinstance(inputs[0] , _a ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_a , **_a ) elif isinstance(_a , _a ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(_a , return_tensors=_a , **_a ) def __a ( self , _a ) -> int: return self.model(**_a ) def __a ( self , _a , _a=None , _a=1 , _a=True ) -> Tuple: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase_ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase_ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: lowerCAmelCase_ = self.model.config.function_to_apply else: lowerCAmelCase_ = ClassificationFunction.NONE lowerCAmelCase_ = model_outputs["logits"][0] lowerCAmelCase_ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase_ = sigmoid(_a ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase_ = softmax(_a ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase_ = outputs else: raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase_ = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(_a ) ] if not _legacy: dict_scores.sort(key=lambda _a : x["score"] , reverse=_a ) if top_k is not None: lowerCAmelCase_ = dict_scores[:top_k] return dict_scores
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def A(__a: List[str] , __a: Tuple , __a: Tuple , __a: int ): lowerCAmelCase_ = original_name.split("." )[0] lowerCAmelCase_ = key.split("." ) lowerCAmelCase_ = int(key_list[key_list.index(__a ) - 2] ) lowerCAmelCase_ = int(key_list[key_list.index(__a ) - 1] ) lowerCAmelCase_ = orig_block_num - offset lowerCAmelCase_ = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" ) return key def A(__a: Optional[Any] ): lowerCAmelCase_ = OrderedDict() lowerCAmelCase_ , lowerCAmelCase_ = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): lowerCAmelCase_ = key.replace("network" , "poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 lowerCAmelCase_ = key[: key.find("proj" )] lowerCAmelCase_ = key.replace(__a , F"patch_embeddings.{total_embed_found}." ) lowerCAmelCase_ = key.replace("proj" , "projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: lowerCAmelCase_ = "poolformer.encoder." + key if "mlp.fc1" in key: lowerCAmelCase_ = replace_key_with_offset(__a , __a , "mlp.fc1" , "output.conv1" ) if "mlp.fc2" in key: lowerCAmelCase_ = replace_key_with_offset(__a , __a , "mlp.fc2" , "output.conv2" ) if "norm1" in key: lowerCAmelCase_ = replace_key_with_offset(__a , __a , "norm1" , "before_norm" ) if "norm2" in key: lowerCAmelCase_ = replace_key_with_offset(__a , __a , "norm2" , "after_norm" ) if "layer_scale_1" in key: lowerCAmelCase_ = replace_key_with_offset(__a , __a , "layer_scale_1" , "layer_scale_1" ) if "layer_scale_2" in key: lowerCAmelCase_ = replace_key_with_offset(__a , __a , "layer_scale_2" , "layer_scale_2" ) if "head" in key: lowerCAmelCase_ = key.replace("head" , "classifier" ) lowerCAmelCase_ = value return new_state_dict def A(): lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase_ = Image.open(requests.get(__a , stream=__a ).raw ) return image @torch.no_grad() def A(__a: Dict , __a: List[Any] , __a: Optional[Any] ): lowerCAmelCase_ = PoolFormerConfig() # set attributes based on model_name lowerCAmelCase_ = "huggingface/label-files" lowerCAmelCase_ = model_name[-3:] lowerCAmelCase_ = 1000 lowerCAmelCase_ = "imagenet-1k-id2label.json" lowerCAmelCase_ = (1, 1000) # set config attributes lowerCAmelCase_ = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) lowerCAmelCase_ = {int(__a ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} if size == "s12": lowerCAmelCase_ = [2, 2, 6, 2] lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 4.0 lowerCAmelCase_ = 0.9 elif size == "s24": lowerCAmelCase_ = [4, 4, 12, 4] lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 4.0 lowerCAmelCase_ = 0.9 elif size == "s36": lowerCAmelCase_ = [6, 6, 18, 6] lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 4.0 lowerCAmelCase_ = 1E-6 lowerCAmelCase_ = 0.9 elif size == "m36": lowerCAmelCase_ = [6, 6, 18, 6] lowerCAmelCase_ = [96, 192, 384, 768] lowerCAmelCase_ = 4.0 lowerCAmelCase_ = 1E-6 lowerCAmelCase_ = 0.95 elif size == "m48": lowerCAmelCase_ = [8, 8, 24, 8] lowerCAmelCase_ = [96, 192, 384, 768] lowerCAmelCase_ = 4.0 lowerCAmelCase_ = 1E-6 lowerCAmelCase_ = 0.95 else: raise ValueError(F"Size {size} not supported" ) # load image processor lowerCAmelCase_ = PoolFormerImageProcessor(crop_pct=__a ) # Prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=__a , return_tensors="pt" ).pixel_values logger.info(F"Converting model {model_name}..." ) # load original state dict lowerCAmelCase_ = torch.load(__a , map_location=torch.device("cpu" ) ) # rename keys lowerCAmelCase_ = rename_keys(__a ) # create HuggingFace model and load state dict lowerCAmelCase_ = PoolFormerForImageClassification(__a ) model.load_state_dict(__a ) model.eval() # Define image processor lowerCAmelCase_ = PoolFormerImageProcessor(crop_pct=__a ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values # forward pass lowerCAmelCase_ = model(__a ) lowerCAmelCase_ = outputs.logits # define expected logit slices for different models if size == "s12": lowerCAmelCase_ = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": lowerCAmelCase_ = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": lowerCAmelCase_ = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": lowerCAmelCase_ = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": lowerCAmelCase_ = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(F"Size {size} not supported" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , __a , atol=1E-2 ) # finally, save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase__ = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __magic_name__ (__lowercase ): lowerCamelCase__ = (KDPMaDiscreteScheduler,) lowerCamelCase__ = 10 def __a ( self , **_a ) -> Union[str, Any]: lowerCAmelCase_ = { "num_train_timesteps": 1100, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", } config.update(**_a ) return config def __a ( self ) -> Optional[int]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_a ) def __a ( self ) -> Union[str, Any]: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def __a ( self ) -> int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def __a ( self ) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def __a ( self ) -> int: lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(prediction_type="v_prediction" ) lowerCAmelCase_ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase_ = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ = scheduler.scale_model_input(_a , _a ) lowerCAmelCase_ = model(_a , _a ) lowerCAmelCase_ = scheduler.step(_a , _a , _a ) lowerCAmelCase_ = output.prev_sample lowerCAmelCase_ = torch.sum(torch.abs(_a ) ) lowerCAmelCase_ = torch.mean(torch.abs(_a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2 assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0_0_0_2 ) < 1E-3 def __a ( self ) -> Union[str, Any]: if torch_device == "mps": return lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase_ = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ = scheduler.scale_model_input(_a , _a ) lowerCAmelCase_ = model(_a , _a ) lowerCAmelCase_ = scheduler.step(_a , _a , _a ) lowerCAmelCase_ = output.prev_sample lowerCAmelCase_ = torch.sum(torch.abs(_a ) ) lowerCAmelCase_ = torch.mean(torch.abs(_a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 def __a ( self ) -> int: if torch_device == "mps": return lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase_ = scheduler.scale_model_input(_a , _a ) lowerCAmelCase_ = model(_a , _a ) lowerCAmelCase_ = scheduler.step(_a , _a , _a ) lowerCAmelCase_ = output.prev_sample lowerCAmelCase_ = torch.sum(torch.abs(_a ) ) lowerCAmelCase_ = torch.mean(torch.abs(_a ) ) if str(_a ).startswith("cpu" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
# flake8: noqa # Lint as: python3 lowerCamelCase__ = [ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self , _a , _a=2 , _a=True , _a=False , _a=10 , _a=3 , _a=32 * 4 , _a=32 * 6 , _a=4 , _a=32 , ) -> List[str]: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = is_training lowerCAmelCase_ = use_auxiliary_loss lowerCAmelCase_ = num_queries lowerCAmelCase_ = num_channels lowerCAmelCase_ = min_size lowerCAmelCase_ = max_size lowerCAmelCase_ = num_labels lowerCAmelCase_ = mask_feature_size def __a ( self ) -> List[Any]: lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _a ) lowerCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a ) lowerCAmelCase_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5 ).float() lowerCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long() lowerCAmelCase_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __a ( self ) -> int: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs() lowerCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def __a ( self , _a , _a ) -> Optional[int]: lowerCAmelCase_ = output.encoder_hidden_states lowerCAmelCase_ = output.pixel_decoder_hidden_states lowerCAmelCase_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) , config.decoder_config.decoder_layers ) def __a ( self , _a , _a , _a , _a=False ) -> Optional[int]: with torch.no_grad(): lowerCAmelCase_ = MaskFormerModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a ) lowerCAmelCase_ = model(_a , output_hidden_states=_a ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_a , _a ) def __a ( self , _a , _a , _a , _a , _a ) -> Tuple: lowerCAmelCase_ = MaskFormerForInstanceSegmentation(config=_a ) model.to(_a ) model.eval() def comm_check_on_output(_a ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a ) lowerCAmelCase_ = model(_a ) comm_check_on_output(_a ) lowerCAmelCase_ = model( pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a ) comm_check_on_output(_a ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCamelCase__ = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self ) -> Tuple: lowerCAmelCase_ = MaskFormerModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a ) def __a ( self ) -> str: self.config_tester.run_common_tests() def __a ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a ) def __a ( self ) -> Dict: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_a ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def __a ( self ) -> Dict: pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def __a ( self ) -> str: pass @unittest.skip(reason="MaskFormer is not a generative model" ) def __a ( self ) -> Dict: pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def __a ( self ) -> Tuple: pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self ) -> List[str]: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __a ( self ) -> Dict: pass def __a ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_a ) lowerCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _a ) @slow def __a ( self ) -> List[str]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ = MaskFormerModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def __a ( self ) -> List[str]: lowerCAmelCase_ = (self.model_tester.min_size,) * 2 lowerCAmelCase_ = { "pixel_values": torch.randn((2, 3, *size) , device=_a ), "mask_labels": torch.randn((2, 10, *size) , device=_a ), "class_labels": torch.zeros(2 , 10 , device=_a ).long(), } lowerCAmelCase_ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_a ) lowerCAmelCase_ = model(**_a ) self.assertTrue(outputs.loss is not None ) def __a ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a ) def __a ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_a ).to(_a ) lowerCAmelCase_ = model(**_a , output_attentions=_a ) self.assertTrue(outputs.attentions is not None ) def __a ( self ) -> Any: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.train() lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a ).loss loss.backward() def __a ( self ) -> Tuple: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.train() lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a ) lowerCAmelCase_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_a ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCamelCase__ = 1e-4 def A(): lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class __magic_name__ (unittest.TestCase ): @cached_property def __a ( self ) -> Tuple: return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def __a ( self ) -> int: lowerCAmelCase_ = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_a ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a ) lowerCAmelCase_ = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_a , (1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase_ = model(**_a ) lowerCAmelCase_ = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(_a ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) ) lowerCAmelCase_ = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(_a ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) ) lowerCAmelCase_ = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(_a ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) ) def __a ( self ) -> List[str]: lowerCAmelCase_ = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(_a ) .eval() ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a ) lowerCAmelCase_ = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_a , (1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase_ = model(**_a ) # masks_queries_logits lowerCAmelCase_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase_ = torch.tensor(_a ).to(_a ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) ) # class_queries_logits lowerCAmelCase_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(_a ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) ) def __a ( self ) -> str: lowerCAmelCase_ = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(_a ) .eval() ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a ) lowerCAmelCase_ = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_a , (1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase_ = model(**_a ) # masks_queries_logits lowerCAmelCase_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase_ = torch.tensor(_a ).to(_a ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) ) # class_queries_logits lowerCAmelCase_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(_a ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) ) def __a ( self ) -> int: lowerCAmelCase_ = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(_a ) .eval() ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) lowerCAmelCase_ = inputs["pixel_values"].to(_a ) lowerCAmelCase_ = [el.to(_a ) for el in inputs["mask_labels"]] lowerCAmelCase_ = [el.to(_a ) for el in inputs["class_labels"]] with torch.no_grad(): lowerCAmelCase_ = model(**_a ) self.assertTrue(outputs.loss is not None )
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = StableDiffusionDiffEditPipeline lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} lowerCamelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCamelCase__ = frozenset([] ) def __a ( self ) -> Optional[Any]: torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , ) lowerCAmelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , ) lowerCAmelCase_ = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_zero=_a , ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCAmelCase_ = CLIPTextModel(_a ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCAmelCase_ = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __a ( self , _a , _a=0 ) -> Union[str, Any]: lowerCAmelCase_ = floats_tensor((1, 16, 16) , rng=random.Random(_a ) ).to(_a ) lowerCAmelCase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("mps" ): lowerCAmelCase_ = torch.manual_seed(_a ) else: lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a ) lowerCAmelCase_ = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __a ( self , _a , _a=0 ) -> Any: lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(_a ) ).convert("RGB" ) if str(_a ).startswith("mps" ): lowerCAmelCase_ = torch.manual_seed(_a ) else: lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a ) lowerCAmelCase_ = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __a ( self , _a , _a=0 ) -> Any: lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(_a ) ).convert("RGB" ) if str(_a ).startswith("mps" ): lowerCAmelCase_ = torch.manual_seed(_a ) else: lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a ) lowerCAmelCase_ = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def __a ( self ) -> str: if not hasattr(self.pipeline_class , "_optional_components" ): return lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_a , _a , _a ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCAmelCase_ = self.get_dummy_inputs(_a ) lowerCAmelCase_ = pipe(**_a )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_a ) lowerCAmelCase_ = self.pipeline_class.from_pretrained(_a ) pipe_loaded.to(_a ) pipe_loaded.set_progress_bar_config(disable=_a ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_a , _a ) is None , f"`{optional_component}` did not stay set to None after loading." , ) lowerCAmelCase_ = self.get_dummy_inputs(_a ) lowerCAmelCase_ = pipe_loaded(**_a )[0] lowerCAmelCase_ = np.abs(output - output_loaded ).max() self.assertLess(_a , 1E-4 ) def __a ( self ) -> Any: lowerCAmelCase_ = "cpu" lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_mask_inputs(_a ) lowerCAmelCase_ = pipe.generate_mask(**_a ) lowerCAmelCase_ = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCAmelCase_ = np.array([0] * 9 ) lowerCAmelCase_ = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = "cpu" lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inversion_inputs(_a ) lowerCAmelCase_ = pipe.invert(**_a ).images lowerCAmelCase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCAmelCase_ = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1E-3 ) def __a ( self ) -> List[str]: super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = "cpu" lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = {"beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "beta_schedule": "scaled_linear"} lowerCAmelCase_ = DPMSolverMultistepScheduler(**_a ) lowerCAmelCase_ = DPMSolverMultistepInverseScheduler(**_a ) lowerCAmelCase_ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inversion_inputs(_a ) lowerCAmelCase_ = pipe.invert(**_a ).images lowerCAmelCase_ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCAmelCase_ = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1E-3 ) @require_torch_gpu @slow class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __a ( cls ) -> Optional[int]: lowerCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) lowerCAmelCase_ = raw_image.convert("RGB" ).resize((768, 768) ) lowerCAmelCase_ = raw_image def __a ( self ) -> int: lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=_a , torch_dtype=torch.floataa ) lowerCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCAmelCase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "a bowl of fruit" lowerCAmelCase_ = "a bowl of pears" lowerCAmelCase_ = pipe.generate_mask( image=self.raw_image , source_prompt=_a , target_prompt=_a , generator=_a , ) lowerCAmelCase_ = pipe.invert( prompt=_a , image=self.raw_image , inpaint_strength=0.7 , generator=_a ).latents lowerCAmelCase_ = pipe( prompt=_a , mask_image=_a , image_latents=_a , generator=_a , negative_prompt=_a , inpaint_strength=0.7 , output_type="numpy" , ).images[0] lowerCAmelCase_ = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def __a ( self ) -> Optional[int]: lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=_a , torch_dtype=torch.floataa ) lowerCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCAmelCase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "a bowl of fruit" lowerCAmelCase_ = "a bowl of pears" lowerCAmelCase_ = pipe.generate_mask( image=self.raw_image , source_prompt=_a , target_prompt=_a , generator=_a , ) lowerCAmelCase_ = pipe.invert( prompt=_a , image=self.raw_image , inpaint_strength=0.7 , generator=_a , num_inference_steps=25 , ).latents lowerCAmelCase_ = pipe( prompt=_a , mask_image=_a , image_latents=_a , generator=_a , negative_prompt=_a , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0] lowerCAmelCase_ = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict" , [ SplitDict(), SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ), SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ), SplitDict({"train": SplitInfo()} ), ] , ) def A(__a: SplitDict ): lowerCAmelCase_ = split_dict._to_yaml_list() assert len(__a ) == len(__a ) lowerCAmelCase_ = SplitDict._from_yaml_list(__a ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase_ = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase_ = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info" , [SplitInfo(), SplitInfo(dataset_name=__a ), SplitInfo(dataset_name="my_dataset" )] ) def A(__a: Any ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase_ = asdict(SplitDict({"train": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class __magic_name__ (__lowercase ): def __init__( self , _a = None , _a = None , _a = None , _a = None , _a = False , _a = False , _a = None , **_a , ) -> Dict: lowerCAmelCase_ = path_or_paths lowerCAmelCase_ = split if split or isinstance(_a , _a ) else "train" lowerCAmelCase_ = features lowerCAmelCase_ = cache_dir lowerCAmelCase_ = keep_in_memory lowerCAmelCase_ = streaming lowerCAmelCase_ = num_proc lowerCAmelCase_ = kwargs @abstractmethod def __a ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class __magic_name__ (__lowercase ): def __init__( self , _a = None , _a = None , _a = False , _a = False , _a = None , **_a , ) -> List[str]: lowerCAmelCase_ = features lowerCAmelCase_ = cache_dir lowerCAmelCase_ = keep_in_memory lowerCAmelCase_ = streaming lowerCAmelCase_ = num_proc lowerCAmelCase_ = kwargs @abstractmethod def __a ( self ) -> Union[Dataset, IterableDataset]: pass
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
def A(__a: int ): if not isinstance(__a , __a ) or number < 0: raise ValueError("Input must be a non-negative integer" ) lowerCAmelCase_ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
def A(): lowerCAmelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowerCAmelCase_ = 6 lowerCAmelCase_ = 1 lowerCAmelCase_ = 1901 lowerCAmelCase_ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowerCAmelCase_ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowerCAmelCase_ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowerCAmelCase_ = day - days_per_month[month - 2] if month > 12: year += 1 lowerCAmelCase_ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = IFInpaintingSuperResolutionPipeline lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __a ( self , _a , _a=0 ) -> Any: if str(_a ).startswith("mps" ): lowerCAmelCase_ = torch.manual_seed(_a ) else: lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a ) lowerCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a ) lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) lowerCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __a ( self ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self ) -> int: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> List[str]: self._test_save_load_local() def __a ( self ) -> List[Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __magic_name__ (__lowercase ): @require_torch def __a ( self ) -> Dict: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache lowerCAmelCase_ = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(_a ) BertModel.from_pretrained(_a ) BertTokenizer.from_pretrained(_a ) pipeline(task="fill-mask" , model=_a ) # baseline - just load from_pretrained with normal network lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed lowerCAmelCase_ = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase_ = "1" lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __a ( self ) -> List[str]: # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache lowerCAmelCase_ = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(_a ) BertModel.from_pretrained(_a ) BertTokenizer.from_pretrained(_a ) pipeline(task="fill-mask" , model=_a ) # baseline - just load from_pretrained with normal network lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed lowerCAmelCase_ = self.get_env() lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __a ( self ) -> Optional[Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run] )] # should succeed lowerCAmelCase_ = self.get_env() lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase_ = "1" lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "\nfrom transformers import pipeline\n " lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " lowerCAmelCase_ = self.get_env() lowerCAmelCase_ = "1" lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, mock, run] )] lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "\nfrom transformers import AutoModel\n " lowerCAmelCase_ = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run] )] # should succeed lowerCAmelCase_ = self.get_env() lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase_ = "1" lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __magic_name__ (__lowercase ): def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_a , "width_multiplier" ) ) class __magic_name__ : def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.0_2 , _a=True , _a=True , _a=10 , _a=None , _a=0.2_5 , _a=0.0 , _a=0.0 , ) -> Optional[int]: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = make_divisible(512 * width_multiplier , divisor=8 ) lowerCAmelCase_ = hidden_act lowerCAmelCase_ = conv_kernel_size lowerCAmelCase_ = output_stride lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = use_labels lowerCAmelCase_ = is_training lowerCAmelCase_ = num_labels lowerCAmelCase_ = initializer_range lowerCAmelCase_ = scope lowerCAmelCase_ = width_multiplier lowerCAmelCase_ = ffn_dropout lowerCAmelCase_ = attn_dropout def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ = None lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase_ = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self ) -> Any: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def __a ( self , _a , _a , _a , _a ) -> List[str]: lowerCAmelCase_ = MobileViTVaModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __a ( self , _a , _a , _a , _a ) -> Dict: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = MobileViTVaForImageClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _a , _a , _a , _a ) -> Tuple: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = MobileViTVaForSemanticSegmentation(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCAmelCase_ = model(_a , labels=_a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __a ( self ) -> Dict: lowerCAmelCase_ = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs lowerCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase__ = ( { '''feature-extraction''': MobileViTVaModel, '''image-classification''': MobileViTVaForImageClassification, '''image-segmentation''': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self ) -> Dict: lowerCAmelCase_ = MobileViTVaModelTester(self ) lowerCAmelCase_ = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a ) def __a ( self ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="MobileViTV2 does not use inputs_embeds" ) def __a ( self ) -> Dict: pass @unittest.skip(reason="MobileViTV2 does not support input and output embeddings" ) def __a ( self ) -> List[str]: pass @unittest.skip(reason="MobileViTV2 does not output attentions" ) def __a ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." ) def __a ( self ) -> Optional[int]: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __a ( self ) -> Dict: pass def __a ( self ) -> Optional[int]: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_a ) lowerCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _a ) def __a ( self ) -> str: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> Optional[int]: def check_hidden_states_output(_a , _a , _a ): lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) ) lowerCAmelCase_ = outputs.hidden_states lowerCAmelCase_ = 5 self.assertEqual(len(_a ) , _a ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCAmelCase_ = 2 for i in range(len(_a ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ = True check_hidden_states_output(_a , _a , _a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) def __a ( self ) -> Any: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_a ) @slow def __a ( self ) -> Dict: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = MobileViTVaModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def A(): lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): @cached_property def __a ( self ) -> List[str]: return ( MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ) if is_vision_available() else None ) @slow def __a ( self ) -> Dict: lowerCAmelCase_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to( _a ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**_a ) # verify the logits lowerCAmelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCAmelCase_ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) ) @slow def __a ( self ) -> str: lowerCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) lowerCAmelCase_ = model.to(_a ) lowerCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**_a ) lowerCAmelCase_ = outputs.logits # verify the logits lowerCAmelCase_ = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _a ) lowerCAmelCase_ = torch.tensor( [ [[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]], [[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]], [[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]], ] , device=_a , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) ) @slow def __a ( self ) -> Any: lowerCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) lowerCAmelCase_ = model.to(_a ) lowerCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**_a ) lowerCAmelCase_ = outputs.logits.detach().cpu() lowerCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] ) lowerCAmelCase_ = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _a ) lowerCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_a ) lowerCAmelCase_ = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _a )
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
def A(__a: str ): return " ".join( "".join(word[::-1] ) if len(__a ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __magic_name__ (unittest.TestCase ): def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=4 , ) -> Optional[int]: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_attention_mask lowerCAmelCase_ = use_token_type_ids lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_choices def __a ( self ) -> Any: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = None if self.use_attention_mask: lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ = None if self.use_token_type_ids: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __a ( self ) -> Any: lowerCAmelCase_ = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs lowerCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = True lowerCamelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = FlaxRoFormerModelTester(self ) @slow def __a ( self ) -> Any: for model_class_name in self.all_model_classes: lowerCAmelCase_ = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=_a ) lowerCAmelCase_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a ) @require_flax class __magic_name__ (unittest.TestCase ): @slow def __a ( self ) -> Optional[int]: lowerCAmelCase_ = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) lowerCAmelCase_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase_ = model(_a )[0] lowerCAmelCase_ = 50000 lowerCAmelCase_ = (1, 6, vocab_size) self.assertEqual(output.shape , _a ) lowerCAmelCase_ = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''pegasus''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , _a=50265 , _a=1024 , _a=12 , _a=4096 , _a=16 , _a=12 , _a=4096 , _a=16 , _a=0.0 , _a=0.0 , _a=True , _a=True , _a="gelu" , _a=1024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=0 , _a=False , _a=0 , _a=1 , _a=1 , **_a , ) -> Any: lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = d_model lowerCAmelCase_ = encoder_ffn_dim lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = encoder_attention_heads lowerCAmelCase_ = decoder_ffn_dim lowerCAmelCase_ = decoder_layers lowerCAmelCase_ = decoder_attention_heads lowerCAmelCase_ = dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = activation_function lowerCAmelCase_ = init_std lowerCAmelCase_ = encoder_layerdrop lowerCAmelCase_ = decoder_layerdrop lowerCAmelCase_ = use_cache lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , ) @property def __a ( self ) -> int: return self.encoder_attention_heads @property def __a ( self ) -> int: return self.d_model
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
import argparse import json from tqdm import tqdm def A(): lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=__a , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=__a , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=__a , help="where to store parsed gold_data_path file" , ) lowerCAmelCase_ = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: lowerCAmelCase_ = json.load(__a ) for dpr_record in tqdm(__a ): lowerCAmelCase_ = dpr_record["question"] lowerCAmelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(__a ) + "\n" ) if __name__ == "__main__": main()
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''AutoTokenizer''' lowerCamelCase__ = ['''tokenizer'''] lowerCamelCase__ = { '''semantic_prompt''': 1, '''coarse_prompt''': 2, '''fine_prompt''': 2, } def __init__( self , _a , _a=None ) -> str: super().__init__(_a ) lowerCAmelCase_ = speaker_embeddings @classmethod def __a ( cls , _a , _a="speaker_embeddings_path.json" , **_a ) -> Dict: if speaker_embeddings_dict_path is not None: lowerCAmelCase_ = get_file_from_repo( _a , _a , subfolder=kwargs.pop("subfolder" , _a ) , cache_dir=kwargs.pop("cache_dir" , _a ) , force_download=kwargs.pop("force_download" , _a ) , proxies=kwargs.pop("proxies" , _a ) , resume_download=kwargs.pop("resume_download" , _a ) , local_files_only=kwargs.pop("local_files_only" , _a ) , use_auth_token=kwargs.pop("use_auth_token" , _a ) , revision=kwargs.pop("revision" , _a ) , ) if speaker_embeddings_path is None: logger.warning( f"`{os.path.join(_a , _a )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase_ = None else: with open(_a ) as speaker_embeddings_json: lowerCAmelCase_ = json.load(_a ) else: lowerCAmelCase_ = None lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , **_a ) return cls(tokenizer=_a , speaker_embeddings=_a ) def __a ( self , _a , _a="speaker_embeddings_path.json" , _a="speaker_embeddings" , _a = False , **_a , ) -> Any: if self.speaker_embeddings is not None: os.makedirs(os.path.join(_a , _a , "v2" ) , exist_ok=_a ) lowerCAmelCase_ = {} lowerCAmelCase_ = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase_ = self._load_voice_preset(_a ) lowerCAmelCase_ = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] , _a , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=_a , ) lowerCAmelCase_ = os.path.join(_a , f"{prompt_key}_{key}.npy" ) lowerCAmelCase_ = tmp_dict with open(os.path.join(_a , _a ) , "w" ) as fp: json.dump(_a , _a ) super().save_pretrained(_a , _a , **_a ) def __a ( self , _a = None , **_a ) -> Any: lowerCAmelCase_ = self.speaker_embeddings[voice_preset] lowerCAmelCase_ = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase_ = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _a ) , cache_dir=kwargs.pop("cache_dir" , _a ) , force_download=kwargs.pop("force_download" , _a ) , proxies=kwargs.pop("proxies" , _a ) , resume_download=kwargs.pop("resume_download" , _a ) , local_files_only=kwargs.pop("local_files_only" , _a ) , use_auth_token=kwargs.pop("use_auth_token" , _a ) , revision=kwargs.pop("revision" , _a ) , ) if path is None: raise ValueError( f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase_ = np.load(_a ) return voice_preset_dict def __a ( self , _a = None ) -> List[Any]: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , _a=None , _a=None , _a="pt" , _a=256 , _a=False , _a=True , _a=False , **_a , ) -> str: if voice_preset is not None and not isinstance(_a , _a ): if ( isinstance(_a , _a ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase_ = self._load_voice_preset(_a ) else: if isinstance(_a , _a ) and not voice_preset.endswith(".npz" ): lowerCAmelCase_ = voice_preset + ".npz" lowerCAmelCase_ = np.load(_a ) if voice_preset is not None: self._validate_voice_preset_dict(_a , **_a ) lowerCAmelCase_ = BatchFeature(data=_a , tensor_type=_a ) lowerCAmelCase_ = self.tokenizer( _a , return_tensors=_a , padding="max_length" , max_length=_a , return_attention_mask=_a , return_token_type_ids=_a , add_special_tokens=_a , **_a , ) if voice_preset is not None: lowerCAmelCase_ = voice_preset return encoded_text
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
def A(__a: list , __a: int , __a: int = 0 , __a: int = 0 ): lowerCAmelCase_ = right or len(__a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(__a , __a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = '''pytorch_model.bin''' @dataclasses.dataclass class __magic_name__ : lowerCamelCase__ = dataclasses.field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , ) @dataclasses.dataclass class __magic_name__ : lowerCamelCase__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} ) lowerCamelCase__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''The name of the task to train on.'''} , ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''The list of labels for the task.'''} ) @dataclasses.dataclass class __magic_name__ : lowerCamelCase__ = dataclasses.field( metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} ) lowerCamelCase__ = dataclasses.field( default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} ) lowerCamelCase__ = dataclasses.field( default='''no''' , metadata={ '''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]''' } , ) lowerCamelCase__ = dataclasses.field( default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) lowerCamelCase__ = dataclasses.field( default=0.0 , metadata={ '''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.''' } , ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , ) lowerCamelCase__ = dataclasses.field( default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , ) lowerCamelCase__ = dataclasses.field( default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) lowerCamelCase__ = dataclasses.field( default=__lowercase , metadata={'''help''': '''Random seed for initialization.'''} , ) def A(__a: Optional[int] , __a: Tuple , __a: Any , __a: Tuple , __a: int , __a: Any ): lowerCAmelCase_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: lowerCAmelCase_ = dataset.filter(lambda __a : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 lowerCAmelCase_ = int(eval_result * len(__a ) ) print(__a ) lowerCAmelCase_ = dataset.sort("probability" , reverse=__a ) lowerCAmelCase_ = dataset.select(range(__a ) ) lowerCAmelCase_ = dataset.remove_columns(["label", "probability"] ) lowerCAmelCase_ = dataset.rename_column("prediction" , "label" ) lowerCAmelCase_ = dataset.map(lambda __a : {"label": idalabel[example["label"]]} ) lowerCAmelCase_ = dataset.shuffle(seed=args.seed ) lowerCAmelCase_ = os.path.join(__a , F"train_pseudo.{args.data_file_extension}" ) if args.data_file_extension == "csv": dataset.to_csv(__a , index=__a ) else: dataset.to_json(__a ) def A(__a: List[Any] , __a: str , __a: Dict , __a: str , **__a: Union[str, Any] ): lowerCAmelCase_ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() lowerCAmelCase_ = STModelArguments(model_name_or_path=__a ) lowerCAmelCase_ = STDataArguments(train_file=__a , infer_file=__a ) lowerCAmelCase_ = STTrainingArguments(output_dir=__a ) lowerCAmelCase_ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__a ).items(): setattr(__a , __a , __a ) for key, value in kwargs.items(): if hasattr(__a , __a ): setattr(__a , __a , __a ) # Sanity checks lowerCAmelCase_ = {} lowerCAmelCase_ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None lowerCAmelCase_ = args.train_file lowerCAmelCase_ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None lowerCAmelCase_ = args.eval_file for key in data_files: lowerCAmelCase_ = data_files[key].split("." )[-1] assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: lowerCAmelCase_ = extension else: assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) lowerCAmelCase_ = F"{args.output_dir}/self-train_iter-{{}}".format lowerCAmelCase_ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__a ) os.makedirs(__a , exist_ok=__a ) accelerator.wait_for_everyone() lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = 0 lowerCAmelCase_ = False # Show the progress bar lowerCAmelCase_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): lowerCAmelCase_ = data_dir_format(__a ) assert os.path.exists(__a ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 lowerCAmelCase_ = os.path.join(__a , "stage-1" ) lowerCAmelCase_ = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__a , __a ): arguments_dict.update({key: value} ) lowerCAmelCase_ = os.path.join(__a , "best-checkpoint" , __a ) if os.path.exists(__a ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __a , __a , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __a ) finetune(**__a ) accelerator.wait_for_everyone() assert os.path.exists(__a ) logger.info("Self-training job completed: iteration: %d, stage: 1." , __a ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data lowerCAmelCase_ = os.path.join(__a , "best-checkpoint" ) lowerCAmelCase_ = os.path.join(__a , "stage-2" ) # Update arguments_dict lowerCAmelCase_ = model_path lowerCAmelCase_ = data_files["train"] lowerCAmelCase_ = current_output_dir lowerCAmelCase_ = os.path.join(__a , "best-checkpoint" , __a ) if os.path.exists(__a ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __a , __a , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __a ) finetune(**__a ) accelerator.wait_for_everyone() assert os.path.exists(__a ) logger.info("Self-training job completed: iteration: %d, stage: 2." , __a ) lowerCAmelCase_ = iteration lowerCAmelCase_ = data_dir_format(iteration + 1 ) lowerCAmelCase_ = AutoConfig.from_pretrained(os.path.join(__a , "best-checkpoint" ) ) lowerCAmelCase_ = config.idalabel lowerCAmelCase_ = os.path.join(__a , "eval_results_best-checkpoint.json" ) lowerCAmelCase_ = os.path.join(__a , "test_results_best-checkpoint.json" ) assert os.path.exists(__a ) with open(__a , "r" ) as f: lowerCAmelCase_ = float(json.load(__a )[args.eval_metric] ) lowerCAmelCase_ = os.path.join(__a , "infer_output_best-checkpoint.csv" ) assert os.path.exists(__a ) # Loading the dataset from local csv or json files. lowerCAmelCase_ = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] lowerCAmelCase_ = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(__a , exist_ok=__a ) shutil.copy(__a , os.path.join(__a , F"eval_results_iter-{iteration}.json" ) ) if os.path.exists(__a ): shutil.copy(__a , os.path.join(__a , F"test_results_iter-{iteration}.json" ) ) create_pseudo_labeled_data(__a , __a , __a , __a , __a , __a ) accelerator.wait_for_everyone() lowerCAmelCase_ = os.path.join(__a , F"train_pseudo.{args.data_file_extension}" ) if args.evaluation_strategy != IntervalStrategy.NO.value: lowerCAmelCase_ = eval_result if best_iteration is None: lowerCAmelCase_ = new_iteration lowerCAmelCase_ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: lowerCAmelCase_ = new_iteration lowerCAmelCase_ = new_eval_result lowerCAmelCase_ = 0 else: if new_eval_result == best_eval_result: lowerCAmelCase_ = new_iteration lowerCAmelCase_ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: lowerCAmelCase_ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , __a ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__a , F"eval_results_iter-{iteration}.json" ) , os.path.join(__a , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__a , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(__a , "eval_results_best-iteration.json" ) , )
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
from __future__ import annotations import numpy as np def A(__a: np.ndarray ): lowerCAmelCase_ , lowerCAmelCase_ = np.shape(__a ) if rows != columns: lowerCAmelCase_ = ( "'table' has to be of square shaped array but got a " F"{rows}x{columns} array:\n{table}" ) raise ValueError(__a ) lowerCAmelCase_ = np.zeros((rows, columns) ) lowerCAmelCase_ = np.zeros((rows, columns) ) for i in range(__a ): for j in range(__a ): lowerCAmelCase_ = sum(lower[i][k] * upper[k][j] for k in range(__a ) ) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists" ) lowerCAmelCase_ = (table[i][j] - total) / upper[j][j] lowerCAmelCase_ = 1 for j in range(__a , __a ): lowerCAmelCase_ = sum(lower[i][k] * upper[k][j] for k in range(__a ) ) lowerCAmelCase_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase__ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
from argparse import ArgumentParser from . import BaseTransformersCLICommand def A(__a: str ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __magic_name__ (__lowercase ): @staticmethod def __a ( _a ) -> str: lowerCAmelCase_ = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=_a , default=_a , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=_a , help="Name of the model to download" ) download_parser.set_defaults(func=_a ) def __init__( self , _a , _a , _a , _a ) -> List[str]: lowerCAmelCase_ = model lowerCAmelCase_ = cache lowerCAmelCase_ = force lowerCAmelCase_ = trust_remote_code def __a ( self ) -> int: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
lowerCamelCase__ = { '''meter''': '''m''', '''kilometer''': '''km''', '''megametre''': '''Mm''', '''gigametre''': '''Gm''', '''terametre''': '''Tm''', '''petametre''': '''Pm''', '''exametre''': '''Em''', '''zettametre''': '''Zm''', '''yottametre''': '''Ym''', } # Exponent of the factor(meter) lowerCamelCase__ = { '''m''': 0, '''km''': 3, '''Mm''': 6, '''Gm''': 9, '''Tm''': 12, '''Pm''': 15, '''Em''': 18, '''Zm''': 21, '''Ym''': 24, } def A(__a: float , __a: str , __a: str ): lowerCAmelCase_ = from_type.lower().strip("s" ) lowerCAmelCase_ = to_type.lower().strip("s" ) lowerCAmelCase_ = UNIT_SYMBOL.get(__a , __a ) lowerCAmelCase_ = UNIT_SYMBOL.get(__a , __a ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase_ = ( F"Invalid 'from_type' value: {from_type!r}.\n" F"Conversion abbreviations are: {', '.join(__a )}" ) raise ValueError(__a ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase_ = ( F"Invalid 'to_type' value: {to_type!r}.\n" F"Conversion abbreviations are: {', '.join(__a )}" ) raise ValueError(__a ) lowerCAmelCase_ = METRIC_CONVERSION[from_sanitized] lowerCAmelCase_ = METRIC_CONVERSION[to_sanitized] lowerCAmelCase_ = 1 if from_exponent > to_exponent: lowerCAmelCase_ = from_exponent - to_exponent else: lowerCAmelCase_ = -(to_exponent - from_exponent) return value * pow(10 , __a ) if __name__ == "__main__": from doctest import testmod testmod()
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = DownBlockaD # noqa F405 lowerCamelCase__ = '''down''' def __a ( self ) -> Dict: lowerCAmelCase_ = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = ResnetDownsampleBlockaD # noqa F405 lowerCamelCase__ = '''down''' def __a ( self ) -> Optional[int]: lowerCAmelCase_ = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = AttnDownBlockaD # noqa F405 lowerCamelCase__ = '''down''' def __a ( self ) -> Optional[int]: lowerCAmelCase_ = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = CrossAttnDownBlockaD # noqa F405 lowerCamelCase__ = '''down''' def __a ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = 32 return init_dict, inputs_dict def __a ( self ) -> Tuple: lowerCAmelCase_ = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = SimpleCrossAttnDownBlockaD # noqa F405 lowerCamelCase__ = '''down''' @property def __a ( self ) -> Any: return super().get_dummy_input(include_encoder_hidden_states=_a ) def __a ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = SkipDownBlockaD # noqa F405 lowerCamelCase__ = '''down''' @property def __a ( self ) -> Optional[int]: return super().get_dummy_input(include_skip_sample=_a ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = AttnSkipDownBlockaD # noqa F405 lowerCamelCase__ = '''down''' @property def __a ( self ) -> Any: return super().get_dummy_input(include_skip_sample=_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = DownEncoderBlockaD # noqa F405 lowerCamelCase__ = '''down''' @property def __a ( self ) -> Union[str, Any]: return super().get_dummy_input(include_temb=_a ) def __a ( self ) -> Dict: lowerCAmelCase_ = { "in_channels": 32, "out_channels": 32, } lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Dict: lowerCAmelCase_ = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = AttnDownEncoderBlockaD # noqa F405 lowerCamelCase__ = '''down''' @property def __a ( self ) -> List[Any]: return super().get_dummy_input(include_temb=_a ) def __a ( self ) -> Dict: lowerCAmelCase_ = { "in_channels": 32, "out_channels": 32, } lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = UNetMidBlockaD # noqa F405 lowerCamelCase__ = '''mid''' def __a ( self ) -> Optional[int]: lowerCAmelCase_ = { "in_channels": 32, "temb_channels": 128, } lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> List[str]: lowerCAmelCase_ = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = UNetMidBlockaDCrossAttn # noqa F405 lowerCamelCase__ = '''mid''' def __a ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = 32 return init_dict, inputs_dict def __a ( self ) -> Tuple: lowerCAmelCase_ = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405 lowerCamelCase__ = '''mid''' @property def __a ( self ) -> Dict: return super().get_dummy_input(include_encoder_hidden_states=_a ) def __a ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = 32 return init_dict, inputs_dict def __a ( self ) -> Optional[int]: lowerCAmelCase_ = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = UpBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = ResnetUpsampleBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=_a ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = CrossAttnUpBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=_a ) def __a ( self ) -> Tuple: lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = 32 return init_dict, inputs_dict def __a ( self ) -> Optional[int]: lowerCAmelCase_ = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = SimpleCrossAttnUpBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> Union[str, Any]: return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = 32 return init_dict, inputs_dict def __a ( self ) -> Dict: lowerCAmelCase_ = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = AttnUpBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=_a ) @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self ) -> Any: lowerCAmelCase_ = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = SkipUpBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> List[str]: return super().get_dummy_input(include_res_hidden_states_tuple=_a ) def __a ( self ) -> Tuple: lowerCAmelCase_ = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = AttnSkipUpBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=_a ) def __a ( self ) -> Any: lowerCAmelCase_ = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = UpDecoderBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> int: return super().get_dummy_input(include_temb=_a ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = {"in_channels": 32, "out_channels": 32} lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> int: lowerCAmelCase_ = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7] super().test_output(_a ) class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = AttnUpDecoderBlockaD # noqa F405 lowerCamelCase__ = '''up''' @property def __a ( self ) -> Tuple: return super().get_dummy_input(include_temb=_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = {"in_channels": 32, "out_channels": 32} lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8] super().test_output(_a )
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = CTRLTokenizer lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"] lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) ) lowerCAmelCase_ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""] lowerCAmelCase_ = {"unk_token": "<unk>"} lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_a ) ) def __a ( self , **_a ) -> List[Any]: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a ) def __a ( self , _a ) -> List[Any]: lowerCAmelCase_ = "adapt react readapt apt" lowerCAmelCase_ = "adapt react readapt apt" return input_text, output_text def __a ( self ) -> str: lowerCAmelCase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ = "adapt react readapt apt" lowerCAmelCase_ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split() lowerCAmelCase_ = tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokens + [tokenizer.unk_token] lowerCAmelCase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
def A(__a: int ): if isinstance(__a , __a ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(__a , __a ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" lowerCAmelCase_ = False if num < 0: lowerCAmelCase_ = True lowerCAmelCase_ = -num lowerCAmelCase_ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(__a ) for e in binary ) return "0b" + "".join(str(__a ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
import numpy as np import qiskit def A(__a: int = 8 , __a: int | None = None ): lowerCAmelCase_ = np.random.default_rng(seed=__a ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowerCAmelCase_ = 6 * key_len # Measurement basis for Alice's qubits. lowerCAmelCase_ = rng.integers(2 , size=__a ) # The set of states Alice will prepare. lowerCAmelCase_ = rng.integers(2 , size=__a ) # Measurement basis for Bob's qubits. lowerCAmelCase_ = rng.integers(2 , size=__a ) # Quantum Circuit to simulate BB84 lowerCAmelCase_ = qiskit.QuantumCircuit(__a , name="BB84" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(__a ): if alice_state[index] == 1: bbaa_circ.x(__a ) if alice_basis[index] == 1: bbaa_circ.h(__a ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(__a ): if bob_basis[index] == 1: bbaa_circ.h(__a ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowerCAmelCase_ = qiskit.Aer.get_backend("aer_simulator" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowerCAmelCase_ = qiskit.execute(__a , __a , shots=1 , seed_simulator=__a ) # Returns the result of measurement. lowerCAmelCase_ = job.result().get_counts(__a ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowerCAmelCase_ = "".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( __a , __a , __a ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowerCAmelCase_ = gen_key[:key_len] if len(__a ) >= key_len else gen_key.ljust(__a , "0" ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = TypeVar('''DatasetType''', Dataset, IterableDataset) def A(__a: List[DatasetType] , __a: Optional[List[float]] = None , __a: Optional[int] = None , __a: Optional[DatasetInfo] = None , __a: Optional[NamedSplit] = None , __a: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(__a ): if not isinstance(__a , (Dataset, IterableDataset) ): if isinstance(__a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( F"Dataset at position {i} has at least one split: {list(__a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}." ) if i == 0: lowerCAmelCase_ , lowerCAmelCase_ = ( (Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset) ) elif not isinstance(__a , __a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __a , __a , __a , info=__a , split=__a , stopping_strategy=__a ) else: return _interleave_iterable_datasets( __a , __a , __a , info=__a , split=__a , stopping_strategy=__a ) def A(__a: List[DatasetType] , __a: Optional[DatasetInfo] = None , __a: Optional[NamedSplit] = None , __a: int = 0 , ): if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(__a ): if not isinstance(__a , (Dataset, IterableDataset) ): if isinstance(__a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( F"Dataset at position {i} has at least one split: {list(__a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}." ) if i == 0: lowerCAmelCase_ , lowerCAmelCase_ = ( (Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset) ) elif not isinstance(__a , __a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a ) else: return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __magic_name__ (unittest.TestCase ): def __a ( self ) -> str: lowerCAmelCase_ = torch.nn.Linear(10 , 10 ) lowerCAmelCase_ = torch.optim.SGD(model.parameters() , 0.1 ) lowerCAmelCase_ = Accelerator() lowerCAmelCase_ = accelerator.prepare(_a ) try: pickle.loads(pickle.dumps(_a ) ) except Exception as e: self.fail(f"Accelerated optimizer pickling failed with {e}" ) AcceleratorState._reset_state()
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): def __init__( self , *_a , **_a ) -> None: warnings.warn( "The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DeiTImageProcessor instead." , _a , ) super().__init__(*_a , **_a )
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
from sklearn.metrics import mean_squared_error import datasets lowerCamelCase__ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCamelCase__ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCamelCase__ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def __a ( self ) -> Dict: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def __a ( self , _a , _a , _a=None , _a="uniform_average" , _a=True ) -> str: lowerCAmelCase_ = mean_squared_error( _a , _a , sample_weight=_a , multioutput=_a , squared=_a ) return {"mse": mse}
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): lowerCamelCase__ = CLIPConfig lowerCamelCase__ = ['''CLIPEncoderLayer'''] def __init__( self , _a ) -> Any: super().__init__(_a ) lowerCAmelCase_ = CLIPVisionModelWithProjection(config.vision_config ) lowerCAmelCase_ = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCAmelCase_ = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def __a ( self , _a , _a , _a=0.5 , _a=0.5 ) -> str: lowerCAmelCase_ = self.vision_model(_a )[0] lowerCAmelCase_ = self.p_head(_a ) lowerCAmelCase_ = nsfw_detected.flatten() lowerCAmelCase_ = nsfw_detected > p_threshold lowerCAmelCase_ = nsfw_detected.tolist() if any(_a ): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, nsfw_detected_ in enumerate(_a ): if nsfw_detected_: lowerCAmelCase_ = np.zeros(images[idx].shape ) lowerCAmelCase_ = self.w_head(_a ) lowerCAmelCase_ = watermark_detected.flatten() lowerCAmelCase_ = watermark_detected > w_threshold lowerCAmelCase_ = watermark_detected.tolist() if any(_a ): logger.warning( "Potential watermarked content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, watermark_detected_ in enumerate(_a ): if watermark_detected_: lowerCAmelCase_ = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ (__lowercase ): def __a ( self ) -> Dict: lowerCAmelCase_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_a , "embed_dim" ) ) self.parent.assertTrue(hasattr(_a , "num_heads" ) ) class __magic_name__ : def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=[16, 48, 96] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.0_2 , _a=1E-12 , _a=True , _a=True , _a=2 , ) -> List[str]: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_sizes lowerCAmelCase_ = patch_stride lowerCAmelCase_ = patch_padding lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = num_heads lowerCAmelCase_ = stride_kv lowerCAmelCase_ = depth lowerCAmelCase_ = cls_token lowerCAmelCase_ = attention_drop_rate lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase_ = self.get_config() return config, pixel_values, labels def __a ( self ) -> Dict: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __a ( self , _a , _a , _a ) -> Optional[Any]: lowerCAmelCase_ = CvtModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a ) lowerCAmelCase_ = (self.image_size, self.image_size) lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1] for i in range(len(self.depth ) ): lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __a ( self , _a , _a , _a ) -> str: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = CvtForImageClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs lowerCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self ) -> str: lowerCAmelCase_ = CvtModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def __a ( self ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __a ( self ) -> Tuple: return @unittest.skip(reason="Cvt does not output attentions" ) def __a ( self ) -> Tuple: pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def __a ( self ) -> Tuple: pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def __a ( self ) -> str: pass def __a ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_a ) lowerCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _a ) def __a ( self ) -> str: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> Union[str, Any]: def check_hidden_states_output(_a , _a , _a ): lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) ) lowerCAmelCase_ = outputs.hidden_states lowerCAmelCase_ = len(self.model_tester.depth ) self.assertEqual(len(_a ) , _a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ = True check_hidden_states_output(_a , _a , _a ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __a ( self ) -> List[Any]: pass @slow def __a ( self ) -> int: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = CvtModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def A(): lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): @cached_property def __a ( self ) -> int: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __a ( self ) -> List[str]: lowerCAmelCase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**_a ) # verify the logits lowerCAmelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCAmelCase_ = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
from torch import nn def A(__a: Tuple ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"Unsupported activation function: {act_fn}" )
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def A(__a: Tuple ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def A(__a: int ): class __magic_name__ : def __init__( self , _a ) -> List[str]: lowerCAmelCase_ = metric_id class __magic_name__ : lowerCamelCase__ = [MetricMock(__lowercase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']] def __a ( self ) -> Tuple: return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def A(__a: str , __a: Tuple , __a: Tuple , __a: int , __a: Union[str, Any] ): if "tmp_path" in args: lowerCAmelCase_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(__a , match="https://huggingface.co/docs/evaluate" ): func(*__a )
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class __magic_name__ : def __init__( self , _a ) -> Optional[Any]: lowerCAmelCase_ = data lowerCAmelCase_ = [0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0] @staticmethod def __a ( _a , _a ) -> List[str]: return ((n << b) | (n >> (32 - b))) & 0xffffffff def __a ( self ) -> Optional[int]: lowerCAmelCase_ = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64) lowerCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def __a ( self ) -> Tuple: return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __a ( self , _a ) -> Optional[Any]: lowerCAmelCase_ = list(struct.unpack(">16L" , _a ) ) + [0] * 64 for i in range(16 , 80 ): lowerCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __a ( self ) -> Dict: lowerCAmelCase_ = self.padding() lowerCAmelCase_ = self.split_blocks() for block in self.blocks: lowerCAmelCase_ = self.expand_block(_a ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.h for i in range(0 , 80 ): if 0 <= i < 20: lowerCAmelCase_ = (b & c) | ((~b) & d) lowerCAmelCase_ = 0x5a827999 elif 20 <= i < 40: lowerCAmelCase_ = b ^ c ^ d lowerCAmelCase_ = 0x6ed9eba1 elif 40 <= i < 60: lowerCAmelCase_ = (b & c) | (b & d) | (c & d) lowerCAmelCase_ = 0x8f1bbcdc elif 60 <= i < 80: lowerCAmelCase_ = b ^ c ^ d lowerCAmelCase_ = 0xca62c1d6 lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = ( self.rotate(_a , 5 ) + f + e + k + expanded_block[i] & 0xffffffff, a, self.rotate(_a , 30 ), c, d, ) lowerCAmelCase_ = ( self.h[0] + a & 0xffffffff, self.h[1] + b & 0xffffffff, self.h[2] + c & 0xffffffff, self.h[3] + d & 0xffffffff, self.h[4] + e & 0xffffffff, ) return ("{:08x}" * 5).format(*self.h ) def A(): lowerCAmelCase_ = b"Test String" assert SHAaHash(__a ).final_hash() == hashlib.shaa(__a ).hexdigest() # noqa: S324 def A(): lowerCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: lowerCAmelCase_ = f.read() else: lowerCAmelCase_ = bytes(__a , "utf-8" ) print(SHAaHash(__a ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument lowerCamelCase__ = { '''/attention/''': '''/0/SelfAttention/''', '''/self_attention/''': '''/0/SelfAttention/''', '''/encoder_decoder_attention/''': '''/1/EncDecAttention/''', '''value''': '''v''', '''query''': '''q''', '''key''': '''k''', '''out''': '''o''', '''pre_self_attention_layer_norm''': '''0/layer_norm''', '''pre_cross_attention_layer_norm''': '''1/layer_norm''', '''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong '''token_embedder''': '''shared''', '''encoder_norm''': '''final_layer_norm''', '''decoder_norm''': '''final_layer_norm''', '''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''', '''router/router_weights/w/''': '''router/classifier/''', '''roer/roer_weights/w/''': '''router/classifier/''', '''logits_dense''': '''lm_head''', } def A(__a: Any ): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model lowerCAmelCase_ = list(s_dict.keys() ) for key in keys: lowerCAmelCase_ = r".*/layers_(\d+)" lowerCAmelCase_ = key if re.match(__a , __a ): lowerCAmelCase_ = re.sub(r"layers_(\d+)" , r"block/\1/layer" , __a ) lowerCAmelCase_ = r"(encoder|decoder)\/" if re.match(__a , __a ): lowerCAmelCase_ = re.match(__a , __a ).groups() if groups[0] == "encoder": lowerCAmelCase_ = re.sub(r"/mlp/" , r"/1/mlp/" , __a ) lowerCAmelCase_ = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , __a ) elif groups[0] == "decoder": lowerCAmelCase_ = re.sub(r"/mlp/" , r"/2/mlp/" , __a ) lowerCAmelCase_ = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , __a ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: lowerCAmelCase_ = new_key.replace(__a , __a ) print(F"{key} -> {new_key}" ) lowerCAmelCase_ = s_dict.pop(__a ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowerCAmelCase_ = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowerCAmelCase_ = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: lowerCAmelCase_ = s_dict[key].shape[0] lowerCAmelCase_ = s_dict[key] for idx in range(__a ): lowerCAmelCase_ = expert_weihts[idx] print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" ) s_dict.pop(__a ) return s_dict lowerCamelCase__ = { '''NUM_ENCODER_LAYERS''': '''num_layers''', '''NUM_DECODER_LAYERS''': '''num_decoder_layers''', '''NUM_HEADS''': '''num_heads''', '''HEAD_DIM''': '''d_kv''', '''EMBED_DIM''': '''d_model''', '''MLP_DIM''': '''d_ff''', '''NUM_SELECTED_EXPERTS''': '''num_selected_experts''', '''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''', '''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''', '''dense.MlpBlock.activations''': '''feed_forward_proj''', } def A(__a: Optional[int] , __a: Union[str, Any] ): # Convert a google style config to the hugging face fromat import regex as re with open(__a , "r" ) as f: lowerCAmelCase_ = f.read() lowerCAmelCase_ = re.findall(r"(.*) = ([0-9.]*)" , __a ) lowerCAmelCase_ = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": lowerCAmelCase_ = float(__a ) if "." in value else int(__a ) lowerCAmelCase_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" , __a )[0] lowerCAmelCase_ = str(activation[1] ) lowerCAmelCase_ = num_experts lowerCAmelCase_ = SwitchTransformersConfig(**__a ) return config def A(__a: List[Any] , __a: int , __a: Union[str, Any]=None , __a: Union[str, Any]="./" , __a: Dict=8 ): # Initialise PyTorch model print(F"Loading flax weights from : {flax_checkpoint_path}" ) lowerCAmelCase_ = checkpoints.load_tax_checkpoint(__a ) if gin_file is not None: lowerCAmelCase_ = convert_gin_to_config(__a , __a ) else: lowerCAmelCase_ = SwitchTransformersConfig.from_pretrained(__a ) lowerCAmelCase_ = SwitchTransformersForConditionalGeneration(__a ) lowerCAmelCase_ = flax_params["target"] lowerCAmelCase_ = flatten_dict(__a , sep="/" ) lowerCAmelCase_ = rename_keys(__a ) lowerCAmelCase_ = unflatten_dict(__a , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(__a , __a ) print(F"Save PyTorch model to {pytorch_dump_path}" ) pt_model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the''' ''' model architecture. If not provided, a `gin_file` has to be provided.''' ), ) parser.add_argument( '''--gin_file''', default=None, type=str, required=False, help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''', ) parser.add_argument( '''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.''' ) parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''') lowerCamelCase__ = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
from ..utils import DummyObject, requires_backends class __magic_name__ (metaclass=__lowercase ): lowerCamelCase__ = ['''note_seq'''] def __init__( self , *_a , **_a ) -> Optional[Any]: requires_backends(self , ["note_seq"] ) @classmethod def __a ( cls , *_a , **_a ) -> List[str]: requires_backends(cls , ["note_seq"] ) @classmethod def __a ( cls , *_a , **_a ) -> List[Any]: requires_backends(cls , ["note_seq"] )
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
def A(__a: float ): if edge <= 0 or not isinstance(__a , __a ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def A(__a: float ): if edge <= 0 or not isinstance(__a , __a ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''YolosFeatureExtractor'''] lowerCamelCase__ = ['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
from ...processing_utils import ProcessorMixin class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''feature_extractor'''] lowerCamelCase__ = '''TvltImageProcessor''' lowerCamelCase__ = '''TvltFeatureExtractor''' def __init__( self , _a , _a ) -> int: super().__init__(image_processor=_a , feature_extractor=_a ) lowerCAmelCase_ = image_processor lowerCAmelCase_ = feature_extractor def __call__( self , _a=None , _a=None , _a=None , _a=None , _a=False , _a=False , *_a , **_a , ) -> int: if images is None and audio is None: raise ValueError("You need to specify either an `images` or `audio` input to process." ) lowerCAmelCase_ = None if images is not None: lowerCAmelCase_ = self.image_processor(_a , mask_pixel=_a , *_a , **_a ) if images_mixed is not None: lowerCAmelCase_ = self.image_processor(_a , is_mixed=_a , *_a , **_a ) if audio is not None: lowerCAmelCase_ = self.feature_extractor( _a , *_a , sampling_rate=_a , mask_audio=_a , **_a ) lowerCAmelCase_ = {} if audio is not None: output_dict.update(_a ) if images is not None: output_dict.update(_a ) if images_mixed_dict is not None: output_dict.update(_a ) return output_dict @property def __a ( self ) -> Tuple: lowerCAmelCase_ = self.image_processor.model_input_names lowerCAmelCase_ = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase__ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''', }, } lowerCamelCase__ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } lowerCamelCase__ = '''▁''' # Segments (not really needed) lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 2 lowerCamelCase__ = 3 lowerCamelCase__ = 4 class __magic_name__ (__lowercase ): lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = '''left''' lowerCamelCase__ = XLNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) lowerCAmelCase_ = 3 lowerCAmelCase_ = do_lower_case lowerCAmelCase_ = remove_space lowerCAmelCase_ = keep_accents lowerCAmelCase_ = vocab_file lowerCAmelCase_ = False if not self.vocab_file else True def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __a ( self , _a , _a = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(_a ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase_ = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} lowerCamelCase__ = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } lowerCamelCase__ = { '''allenai/longformer-base-4096''': 40_96, '''allenai/longformer-large-4096''': 40_96, '''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96, '''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96, '''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def A(): lowerCAmelCase_ = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCAmelCase_ = bs[:] lowerCAmelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(__a ) cs.append(2**8 + n ) n += 1 lowerCAmelCase_ = [chr(__a ) for n in cs] return dict(zip(__a , __a ) ) def A(__a: Optional[Any] ): lowerCAmelCase_ = set() lowerCAmelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase_ = char return pairs class __magic_name__ (__lowercase ): lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ) -> Tuple: lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , ) with open(_a , encoding="utf-8" ) as vocab_handle: lowerCAmelCase_ = json.load(_a ) lowerCAmelCase_ = {v: k for k, v in self.encoder.items()} lowerCAmelCase_ = errors # how to handle errors in decoding lowerCAmelCase_ = bytes_to_unicode() lowerCAmelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(_a , encoding="utf-8" ) as merges_handle: lowerCAmelCase_ = merges_handle.read().split("\n" )[1:-1] lowerCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) ) lowerCAmelCase_ = {} lowerCAmelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase_ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def __a ( self ) -> Optional[int]: return len(self.encoder ) def __a ( self ) -> Optional[Any]: return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self , _a ) -> int: if token in self.cache: return self.cache[token] lowerCAmelCase_ = tuple(_a ) lowerCAmelCase_ = get_pairs(_a ) if not pairs: return token while True: lowerCAmelCase_ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase_ , lowerCAmelCase_ = bigram lowerCAmelCase_ = [] lowerCAmelCase_ = 0 while i < len(_a ): try: lowerCAmelCase_ = word.index(_a , _a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase_ = j if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase_ = tuple(_a ) lowerCAmelCase_ = new_word if len(_a ) == 1: break else: lowerCAmelCase_ = get_pairs(_a ) lowerCAmelCase_ = " ".join(_a ) lowerCAmelCase_ = word return word def __a ( self , _a ) -> Tuple: lowerCAmelCase_ = [] for token in re.findall(self.pat , _a ): lowerCAmelCase_ = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(" " ) ) return bpe_tokens def __a ( self , _a ) -> int: return self.encoder.get(_a , self.encoder.get(self.unk_token ) ) def __a ( self , _a ) -> Any: return self.decoder.get(_a ) def __a ( self , _a ) -> List[Any]: lowerCAmelCase_ = "".join(_a ) lowerCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def __a ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase_ = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase_ = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_a , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + "\n" ) lowerCAmelCase_ = 0 with open(_a , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCAmelCase_ = token_index writer.write(" ".join(_a ) + "\n" ) index += 1 return vocab_file, merge_file def __a ( self , _a , _a = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self , _a , _a = None , _a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self , _a , _a=False , **_a ) -> Tuple: lowerCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()): lowerCAmelCase_ = " " + text return (text, kwargs)
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''timesformer''' def __init__( self , _a=224 , _a=16 , _a=3 , _a=8 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=1E-6 , _a=True , _a="divided_space_time" , _a=0 , **_a , ) -> Any: super().__init__(**_a ) lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = num_frames lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = attention_type lowerCAmelCase_ = drop_path_rate
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCamelCase__ = 2_56 class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''melgan'''] def __init__( self , _a , _a , _a , _a , _a , ) -> None: super().__init__() # From MELGAN lowerCAmelCase_ = math.log(1E-5 ) # Matches MelGAN training. lowerCAmelCase_ = 4.0 # Largest value for most examples lowerCAmelCase_ = 128 self.register_modules( notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , ) def __a ( self , _a , _a=(-1.0, 1.0) , _a=False ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ = output_range if clip: lowerCAmelCase_ = torch.clip(_a , self.min_value , self.max_value ) # Scale to [0, 1]. lowerCAmelCase_ = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __a ( self , _a , _a=(-1.0, 1.0) , _a=False ) -> int: lowerCAmelCase_ , lowerCAmelCase_ = input_range lowerCAmelCase_ = torch.clip(_a , _a , _a ) if clip else outputs # Scale to [0, 1]. lowerCAmelCase_ = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __a ( self , _a , _a , _a ) -> int: lowerCAmelCase_ = input_tokens > 0 lowerCAmelCase_ , lowerCAmelCase_ = self.notes_encoder( encoder_input_tokens=_a , encoder_inputs_mask=_a ) lowerCAmelCase_ , lowerCAmelCase_ = self.continuous_encoder( encoder_inputs=_a , encoder_inputs_mask=_a ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __a ( self , _a , _a , _a ) -> Optional[int]: lowerCAmelCase_ = noise_time if not torch.is_tensor(_a ): lowerCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0: lowerCAmelCase_ = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCAmelCase_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowerCAmelCase_ = self.decoder( encodings_and_masks=_a , decoder_input_tokens=_a , decoder_noise_time=_a ) return logits @torch.no_grad() def __call__( self , _a , _a = None , _a = 100 , _a = True , _a = "numpy" , _a = None , _a = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(_a )}." ) lowerCAmelCase_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowerCAmelCase_ = np.zeros([1, 0, self.n_dims] , np.floataa ) lowerCAmelCase_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device ) for i, encoder_input_tokens in enumerate(_a ): if i == 0: lowerCAmelCase_ = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowerCAmelCase_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowerCAmelCase_ = ones lowerCAmelCase_ = self.scale_features( _a , output_range=[-1.0, 1.0] , clip=_a ) lowerCAmelCase_ = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_a , continuous_mask=_a , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowerCAmelCase_ = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_a , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_a ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowerCAmelCase_ = self.decode( encodings_and_masks=_a , input_tokens=_a , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowerCAmelCase_ = self.scheduler.step(_a , _a , _a , generator=_a ).prev_sample lowerCAmelCase_ = self.scale_to_features(_a , input_range=[-1.0, 1.0] ) lowerCAmelCase_ = mel[:1] lowerCAmelCase_ = mel.cpu().float().numpy() lowerCAmelCase_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_a , _a ) logger.info("Generated segment" , _a ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowerCAmelCase_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowerCAmelCase_ = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_a )
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
def A(__a: dict ): lowerCAmelCase_ = set() # edges = list of graph's edges lowerCAmelCase_ = get_edges(__a ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowerCAmelCase_ , lowerCAmelCase_ = edges.pop() chosen_vertices.add(__a ) chosen_vertices.add(__a ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(__a ) return chosen_vertices def A(__a: dict ): lowerCAmelCase_ = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ : def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ) -> List[Any]: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_input_mask lowerCAmelCase_ = use_token_type_ids lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_choices lowerCAmelCase_ = scope def __a ( self ) -> Dict: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = None if self.use_input_mask: lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ = None if self.use_token_type_ids: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self ) -> List[Any]: return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.get_config() lowerCAmelCase_ = 300 return config def __a ( self ) -> Any: ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = self.prepare_config_and_inputs() lowerCAmelCase_ = True lowerCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]: lowerCAmelCase_ = MraModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a ) lowerCAmelCase_ = model(_a , token_type_ids=_a ) lowerCAmelCase_ = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[Any]: lowerCAmelCase_ = True lowerCAmelCase_ = MraModel(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) lowerCAmelCase_ = model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , ) lowerCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> str: lowerCAmelCase_ = MraForMaskedLM(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: lowerCAmelCase_ = MraForQuestionAnswering(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = MraForSequenceClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = MraForTokenClassification(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: lowerCAmelCase_ = self.num_choices lowerCAmelCase_ = MraForMultipleChoice(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = config_and_inputs lowerCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = () def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = MraModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , hidden_size=37 ) def __a ( self ) -> Tuple: self.config_tester.run_common_tests() def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> Tuple: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase_ = type self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def __a ( self ) -> int: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def __a ( self ) -> int: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def __a ( self ) -> Dict: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def __a ( self ) -> Union[str, Any]: for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = MraModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip(reason="MRA does not output attentions" ) def __a ( self ) -> Tuple: return @require_torch class __magic_name__ (unittest.TestCase ): @slow def __a ( self ) -> List[Any]: lowerCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) lowerCAmelCase_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase_ = model(_a )[0] lowerCAmelCase_ = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _a ) lowerCAmelCase_ = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) ) @slow def __a ( self ) -> List[str]: lowerCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) lowerCAmelCase_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase_ = model(_a )[0] lowerCAmelCase_ = 50265 lowerCAmelCase_ = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _a ) lowerCAmelCase_ = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) ) @slow def __a ( self ) -> Tuple: lowerCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) lowerCAmelCase_ = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase_ = model(_a )[0] lowerCAmelCase_ = 50265 lowerCAmelCase_ = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , _a ) lowerCAmelCase_ = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = DDIMPipeline lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''latents''', '''callback''', '''callback_steps''', } lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase__ = False def __a ( self ) -> List[Any]: torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) lowerCAmelCase_ = DDIMScheduler() lowerCAmelCase_ = {"unet": unet, "scheduler": scheduler} return components def __a ( self , _a , _a=0 ) -> List[Any]: if str(_a ).startswith("mps" ): lowerCAmelCase_ = torch.manual_seed(_a ) else: lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a ) lowerCAmelCase_ = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self ) -> Optional[int]: lowerCAmelCase_ = "cpu" lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = self.pipeline_class(**_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs(_a ) lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) lowerCAmelCase_ = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_a , 1E-3 ) def __a ( self ) -> Tuple: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __a ( self ) -> Union[str, Any]: super().test_save_load_local(expected_max_difference=3E-3 ) def __a ( self ) -> Dict: super().test_save_load_optional_components(expected_max_difference=3E-3 ) def __a ( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Optional[int]: lowerCAmelCase_ = "google/ddpm-cifar10-32" lowerCAmelCase_ = UNetaDModel.from_pretrained(_a ) lowerCAmelCase_ = DDIMScheduler() lowerCAmelCase_ = DDIMPipeline(unet=_a , scheduler=_a ) ddim.to(_a ) ddim.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = ddim(generator=_a , eta=0.0 , output_type="numpy" ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "google/ddpm-ema-bedroom-256" lowerCAmelCase_ = UNetaDModel.from_pretrained(_a ) lowerCAmelCase_ = DDIMScheduler.from_pretrained(_a ) lowerCAmelCase_ = DDIMPipeline(unet=_a , scheduler=_a ) ddpm.to(_a ) ddpm.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = ddpm(generator=_a , output_type="numpy" ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(__a , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def A(__a: List[str] , __a: Optional[Any] ): lowerCAmelCase_ = _distribute_shards(**__a ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def A(__a: Union[str, Any] , __a: Any , __a: Union[str, Any] ): lowerCAmelCase_ = _split_gen_kwargs(__a , __a ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def A(__a: int , __a: int ): if expected is RuntimeError: with pytest.raises(__a ): _number_of_shards_in_gen_kwargs(__a ) else: lowerCAmelCase_ = _number_of_shards_in_gen_kwargs(__a ) assert out == expected
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
from __future__ import annotations import requests lowerCamelCase__ = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def A(__a: str , __a: int = 1 , __a: str = "new" , __a: list | None = None ): lowerCAmelCase_ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__a ) - valid_terms ) ): lowerCAmelCase_ = F"Invalid search term: {invalid_search_terms}" raise ValueError(__a ) lowerCAmelCase_ = requests.get( F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError lowerCAmelCase_ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__a )} lowerCAmelCase_ = {} for id_ in range(__a ): lowerCAmelCase_ = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def A(__a: Dict , __a: Optional[Any] , __a: Union[str, Any] ): # Initialise PyTorch model lowerCAmelCase_ = MobileBertConfig.from_json_file(__a ) print(F"Building PyTorch model from configuration: {config}" ) lowerCAmelCase_ = MobileBertForPreTraining(__a ) # Load weights from tf checkpoint lowerCAmelCase_ = load_tf_weights_in_mobilebert(__a , __a , __a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , __a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = PegasusTokenizer lowerCamelCase__ = PegasusTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def __a ( self ) -> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ = PegasusTokenizer(_a ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ) -> List[str]: return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def __a ( self , **_a ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a ) def __a ( self , _a ) -> int: return ("This is a test", "This is a test") def __a ( self ) -> int: lowerCAmelCase_ = "</s>" lowerCAmelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(_a ) , 1103 ) def __a ( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) lowerCAmelCase_ = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0] lowerCAmelCase_ = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0] self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase_ = "<mask_1> To ensure a <mask_2> flow of bank resolutions." lowerCAmelCase_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase_ = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0] self.assertListEqual(_a , _a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase_ = "To ensure a smooth flow of bank resolutions." lowerCAmelCase_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase_ = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0] self.assertListEqual(_a , _a ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __a ( self ) -> Dict: lowerCAmelCase_ = ["This is going to be way too long." * 150, "short example"] lowerCAmelCase_ = ["not super long but more than 5 tokens", "tiny"] lowerCAmelCase_ = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="pt" ) lowerCAmelCase_ = self._large_tokenizer( text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(_a ) == 2 # input_ids, attention_mask. @slow def __a ( self ) -> Any: # fmt: off lowerCAmelCase_ = {"input_ids": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_a , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = PegasusTokenizer lowerCamelCase__ = PegasusTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def __a ( self ) -> Any: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ) -> int: return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def __a ( self , **_a ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a ) def __a ( self , _a ) -> Dict: return ("This is a test", "This is a test") def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) lowerCAmelCase_ = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0] lowerCAmelCase_ = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0] self.assertListEqual(_a , _a ) @require_torch def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["This is going to be way too long." * 1000, "short example"] lowerCAmelCase_ = ["not super long but more than 5 tokens", "tiny"] lowerCAmelCase_ = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="pt" ) lowerCAmelCase_ = self._large_tokenizer( text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(_a ) == 2 # input_ids, attention_mask. def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) lowerCAmelCase_ = self._large_tokenizer(_a ).input_ids self.assertListEqual( _a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def A(__a: Union[str, Any] , __a: int , __a: Optional[Any] , __a: Any , __a: int ): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file lowerCAmelCase_ = TapasConfig.from_json_file(__a ) # set absolute/relative position embeddings parameter lowerCAmelCase_ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCAmelCase_ = TapasForQuestionAnswering(config=__a ) elif task == "WTQ": # run_task_main.py hparams lowerCAmelCase_ = 4 lowerCAmelCase_ = True # hparam_utils.py hparams lowerCAmelCase_ = 0.66_4694 lowerCAmelCase_ = 0.20_7951 lowerCAmelCase_ = 0.12_1194 lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = 0.035_2513 lowerCAmelCase_ = TapasForQuestionAnswering(config=__a ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCAmelCase_ = 4 lowerCAmelCase_ = False # hparam_utils.py hparams lowerCAmelCase_ = 36.4519 lowerCAmelCase_ = 0.90_3421 lowerCAmelCase_ = 222.088 lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = 0.76_3141 lowerCAmelCase_ = TapasForQuestionAnswering(config=__a ) elif task == "TABFACT": lowerCAmelCase_ = TapasForSequenceClassification(config=__a ) elif task == "MLM": lowerCAmelCase_ = TapasForMaskedLM(config=__a ) elif task == "INTERMEDIATE_PRETRAINING": lowerCAmelCase_ = TapasModel(config=__a ) else: raise ValueError(F"Task {task} not supported." ) print(F"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__a , __a , __a ) # Save pytorch-model (weights and configuration) print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__a ) # Save tokenizer files print(F"Save tokenizer files to {pytorch_dump_path}" ) lowerCAmelCase_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(__a ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowerCamelCase__ = '''src/transformers''' lowerCamelCase__ = '''docs/source/en/tasks''' def A(__a: Union[str, Any] , __a: List[str] , __a: List[str] ): with open(__a , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCAmelCase_ = f.readlines() # Find the start prompt. lowerCAmelCase_ = 0 while not lines[start_index].startswith(__a ): start_index += 1 start_index += 1 lowerCAmelCase_ = start_index while not lines[end_index].startswith(__a ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH) lowerCamelCase__ = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowerCamelCase__ = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def A(__a: str ): lowerCAmelCase_ = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__a , set() ) lowerCAmelCase_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def A(__a: List[str] , __a: Dict=False ): lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = _find_text_in_file( filename=os.path.join(__a , __a ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) lowerCAmelCase_ = get_model_list_for_task(__a ) if current_list != new_list: if overwrite: with open(os.path.join(__a , __a ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCamelCase__ = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
def A(__a: list[list] ): lowerCAmelCase_ = current_set.copy() for row_index, row in enumerate(__a ): lowerCAmelCase_ = row[0] for column_index, column in enumerate(__a ): if magnitude == 0: lowerCAmelCase_ = column continue lowerCAmelCase_ = column / magnitude # Subtract to cancel term lowerCAmelCase_ = current_set[0] lowerCAmelCase_ = [first_row] lowerCAmelCase_ = current_set[1::] for row in current_set: lowerCAmelCase_ = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__a ) continue for column_index in range(len(__a ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__a ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowerCAmelCase_ = final_set[0] lowerCAmelCase_ = [] lowerCAmelCase_ = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowerCAmelCase_ = simplify(__a ) for i in range(len(__a ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __a ) lowerCAmelCase_ = resultant return final_set def A(__a: list[list] ): if len(__a ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) lowerCAmelCase_ = len(__a ) + 1 if any(len(__a ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(__a , (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(__a ) == 1: return [equations[0][-1] / equations[0][0]] lowerCAmelCase_ = equations.copy() if any(0 in row for row in data_set ): lowerCAmelCase_ = data_set.copy() lowerCAmelCase_ = [] for row_index, row in enumerate(__a ): if 0 not in row: lowerCAmelCase_ = data_set.pop(__a ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0 , __a ) lowerCAmelCase_ = data_set.copy() lowerCAmelCase_ = simplify(__a ) lowerCAmelCase_ = simplified[::-1] lowerCAmelCase_ = [] for row in simplified: lowerCAmelCase_ = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowerCAmelCase_ = row.copy()[: len(__a ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__a ) == 0: solutions.append(0 ) continue lowerCAmelCase_ = temp_row[1::] lowerCAmelCase_ = temp_row[::-1] for column_index, column in enumerate(__a ): current_solution -= column * solutions[column_index] solutions.append(__a ) lowerCAmelCase_ = [] for item in solutions: final.append(float(round(__a , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __magic_name__ : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ) -> List[Any]: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_input_mask lowerCAmelCase_ = use_token_type_ids lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_choices lowerCAmelCase_ = scope def __a ( self ) -> int: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = None if self.use_input_mask: lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ = None if self.use_token_type_ids: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self ) -> Tuple: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: lowerCAmelCase_ = LlamaModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a ) lowerCAmelCase_ = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[Any]: lowerCAmelCase_ = True lowerCAmelCase_ = LlamaModel(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) lowerCAmelCase_ = model( _a , attention_mask=_a , encoder_hidden_states=_a , ) lowerCAmelCase_ = model(_a , attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> str: lowerCAmelCase_ = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int: lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass lowerCAmelCase_ = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , ) lowerCAmelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase_ = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["hidden_states"][0] lowerCAmelCase_ = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["hidden_states"][0] # select random slice lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) ) def __a ( self ) -> List[str]: lowerCAmelCase_ = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = config_and_inputs lowerCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __magic_name__ (__lowercase , __lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () lowerCamelCase__ = (LlamaForCausalLM,) if is_torch_available() else () lowerCamelCase__ = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self ) -> Optional[int]: lowerCAmelCase_ = LlamaModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , hidden_size=37 ) def __a ( self ) -> int: self.config_tester.run_common_tests() def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> int: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase_ = type self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = 3 lowerCAmelCase_ = input_dict["input_ids"] lowerCAmelCase_ = input_ids.ne(1 ).to(_a ) lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase_ = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __a ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = 3 lowerCAmelCase_ = "single_label_classification" lowerCAmelCase_ = input_dict["input_ids"] lowerCAmelCase_ = input_ids.ne(1 ).to(_a ) lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase_ = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __a ( self ) -> List[Any]: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = 3 lowerCAmelCase_ = "multi_label_classification" lowerCAmelCase_ = input_dict["input_ids"] lowerCAmelCase_ = input_ids.ne(1 ).to(_a ) lowerCAmelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase_ = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("LLaMA buffers include complex numbers, which breaks this test" ) def __a ( self ) -> Tuple: pass @parameterized.expand([("linear",), ("dynamic",)] ) def __a ( self , _a ) -> List[Any]: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase_ = LlamaModel(_a ) original_model.to(_a ) original_model.eval() lowerCAmelCase_ = original_model(_a ).last_hidden_state lowerCAmelCase_ = original_model(_a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase_ = {"type": scaling_type, "factor": 1_0.0} lowerCAmelCase_ = LlamaModel(_a ) scaled_model.to(_a ) scaled_model.eval() lowerCAmelCase_ = scaled_model(_a ).last_hidden_state lowerCAmelCase_ = scaled_model(_a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_a , _a , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_a , _a , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_a , _a , atol=1E-5 ) ) @require_torch class __magic_name__ (unittest.TestCase ): @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def __a ( self ) -> Any: lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" ) lowerCAmelCase_ = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase_ = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase_ = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _a , atol=1E-5 , rtol=1E-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def __a ( self ) -> Any: lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" ) lowerCAmelCase_ = model(torch.tensor(_a ) ) # Expected mean on dim = -1 lowerCAmelCase_ = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase_ = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _a , atol=1E-5 , rtol=1E-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def __a ( self ) -> List[str]: lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" ) lowerCAmelCase_ = model(torch.tensor(_a ) ) # Expected mean on dim = -1 lowerCAmelCase_ = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase_ = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 ) @unittest.skip( "Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" ) @slow def __a ( self ) -> Dict: lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" ) lowerCAmelCase_ = model(torch.tensor(_a ) ) lowerCAmelCase_ = torch.tensor( [[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 ) # fmt: off lowerCAmelCase_ = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _a , atol=1E-5 , rtol=1E-5 ) @unittest.skip("Model is curently gated" ) @slow def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi" lowerCAmelCase_ = "Simply put, the theory of relativity states that " lowerCAmelCase_ = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" ) lowerCAmelCase_ = tokenizer.encode(_a , return_tensors="pt" ) lowerCAmelCase_ = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=_a ) # greedy generation outputs lowerCAmelCase_ = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a ) lowerCAmelCase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a ) self.assertEqual(_a , _a )
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''encoder-decoder''' lowerCamelCase__ = True def __init__( self , **_a ) -> Union[str, Any]: super().__init__(**_a ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ = kwargs.pop("encoder" ) lowerCAmelCase_ = encoder_config.pop("model_type" ) lowerCAmelCase_ = kwargs.pop("decoder" ) lowerCAmelCase_ = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ = AutoConfig.for_model(_a , **_a ) lowerCAmelCase_ = AutoConfig.for_model(_a , **_a ) lowerCAmelCase_ = True @classmethod def __a ( cls , _a , _a , **_a ) -> PretrainedConfig: logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) lowerCAmelCase_ = True lowerCAmelCase_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.encoder.to_dict() lowerCAmelCase_ = self.decoder.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = BarthezTokenizer lowerCamelCase__ = BarthezTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def __a ( self ) -> List[str]: super().setUp() lowerCAmelCase_ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_a ) lowerCAmelCase_ = tokenizer def __a ( self ) -> Any: lowerCAmelCase_ = "<pad>" lowerCAmelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def __a ( self ) -> List[str]: lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_a ) , 101122 ) def __a ( self ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def __a ( self ) -> int: lowerCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCAmelCase_ = [0, 57, 3018, 70307, 91, 2] lowerCAmelCase_ = self.tokenizer( _a , max_length=len(_a ) , padding=_a , truncation=_a , return_tensors="pt" ) self.assertIsInstance(_a , _a ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(_a , _a ) def __a ( self ) -> int: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "I was born in 92000, and this is falsé." lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) @slow def __a ( self ) -> Dict: # fmt: off lowerCAmelCase_ = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCAmelCase_ = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=_a , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_a , )
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowerCamelCase__ = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Any: lowerCAmelCase_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) ) lowerCAmelCase_ = self.transformer_dir shutil.copy( os.path.join(_a , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = "src/transformers" shutil.rmtree(self.transformer_dir ) def __a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]: lowerCAmelCase_ = comment + f"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: lowerCAmelCase_ = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result lowerCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase_ = black.format_str(_a , mode=_a ) lowerCAmelCase_ = os.path.join(self.transformer_dir , "new_code.py" ) with open(_a , "w" , newline="\n" ) as f: f.write(_a ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_a ) with open(_a , "r" ) as f: self.assertTrue(f.read() , _a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" ) self.assertEqual(_a , _a ) def __a ( self ) -> Optional[int]: # Base copy consistency self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , _a , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , _a ) , ) # Copy consistency with a really long name lowerCAmelCase_ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("Bert" , _a , _a ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , _a , overwrite_result=re.sub("Bert" , "TestModel" , _a ) , ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = check_copies.LOCALIZED_READMES["README_zh-hans.md"] lowerCAmelCase_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) lowerCAmelCase_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowerCAmelCase_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md( _a , _a , localized_readme["format_model_list"] ) self.assertFalse(_a ) self.assertEqual(_a , _a ) lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md( _a , _a , localized_readme["format_model_list"] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(_a ) lowerCAmelCase_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) lowerCAmelCase_ = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowerCAmelCase_ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md( _a , _a , localized_readme["format_model_list"] ) # Check if the model link is synchronized. self.assertEqual(_a , _a )
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(__lowercase ) class __magic_name__ (__lowercase ): def __init__( self , **_a ) -> Dict: super().__init__(**_a ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self , _a , **_a ) -> str: return super().__call__(_a , **_a ) def __a ( self , **_a ) -> Dict: lowerCAmelCase_ = {} if "candidate_labels" in kwargs: lowerCAmelCase_ = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCAmelCase_ = kwargs["hypothesis_template"] return preprocess_params, {}, {} def __a ( self , _a , _a=None , _a="This is a sound of {}." ) -> Tuple: if isinstance(_a , _a ): if audio.startswith("http://" ) or audio.startswith("https://" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCAmelCase_ = requests.get(_a ).content else: with open(_a , "rb" ) as f: lowerCAmelCase_ = f.read() if isinstance(_a , _a ): lowerCAmelCase_ = ffmpeg_read(_a , self.feature_extractor.sampling_rate ) if not isinstance(_a , np.ndarray ): raise ValueError("We expect a numpy ndarray as input" ) if len(audio.shape ) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" ) lowerCAmelCase_ = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" ) lowerCAmelCase_ = candidate_labels lowerCAmelCase_ = [hypothesis_template.format(_a ) for x in candidate_labels] lowerCAmelCase_ = self.tokenizer(_a , return_tensors=self.framework , padding=_a ) lowerCAmelCase_ = [text_inputs] return inputs def __a ( self , _a ) -> List[Any]: lowerCAmelCase_ = model_inputs.pop("candidate_labels" ) lowerCAmelCase_ = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , _a ): lowerCAmelCase_ = text_inputs[0] else: # Batching case. lowerCAmelCase_ = text_inputs[0][0] lowerCAmelCase_ = self.model(**_a , **_a ) lowerCAmelCase_ = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def __a ( self , _a ) -> Optional[int]: lowerCAmelCase_ = model_outputs.pop("candidate_labels" ) lowerCAmelCase_ = model_outputs["logits"][0] if self.framework == "pt": lowerCAmelCase_ = logits.softmax(dim=0 ) lowerCAmelCase_ = probs.tolist() else: raise ValueError("`tf` framework not supported." ) lowerCAmelCase_ = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] ) ] return result
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''sentencepiece.model'''} lowerCamelCase__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } lowerCamelCase__ = { '''google/rembert''': 2_56, } class __magic_name__ (__lowercase ): lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _a , _a=False , _a=True , _a=True , _a="[CLS]" , _a="[SEP]" , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> str: super().__init__( do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) lowerCAmelCase_ = do_lower_case lowerCAmelCase_ = remove_space lowerCAmelCase_ = keep_accents lowerCAmelCase_ = vocab_file lowerCAmelCase_ = spm.SentencePieceProcessor() self.sp_model.Load(_a ) @property def __a ( self ) -> List[str]: return len(self.sp_model ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: lowerCAmelCase_ = self.__dict__.copy() lowerCAmelCase_ = None return state def __setstate__( self , _a ) -> Any: lowerCAmelCase_ = d lowerCAmelCase_ = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def __a ( self , _a , _a=False ) -> Any: lowerCAmelCase_ = self.sp_model.EncodeAsPieces(_a ) return pieces def __a ( self , _a ) -> Any: return self.sp_model.PieceToId(_a ) def __a ( self , _a ) -> List[Any]: return self.sp_model.IdToPiece(_a ) def __a ( self , _a ) -> Optional[int]: lowerCAmelCase_ = self.sp_model.decode_pieces(_a ) return out_string def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , _a , _a = None , _a = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error("Vocabulary path ({}) should be a directory".format(_a ) ) return lowerCAmelCase_ = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''trocr''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = { '''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model''', '''num_hidden_layers''': '''decoder_layers''', } def __init__( self , _a=50265 , _a=1024 , _a=12 , _a=16 , _a=4096 , _a="gelu" , _a=512 , _a=0.1 , _a=0.0 , _a=0.0 , _a=2 , _a=0.0_2 , _a=0.0 , _a=True , _a=False , _a=True , _a=True , _a=1 , _a=0 , _a=2 , **_a , ) -> Dict: lowerCAmelCase_ = vocab_size lowerCAmelCase_ = d_model lowerCAmelCase_ = decoder_layers lowerCAmelCase_ = decoder_attention_heads lowerCAmelCase_ = decoder_ffn_dim lowerCAmelCase_ = activation_function lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = init_std lowerCAmelCase_ = decoder_layerdrop lowerCAmelCase_ = use_cache lowerCAmelCase_ = scale_embedding lowerCAmelCase_ = use_learned_position_embeddings lowerCAmelCase_ = layernorm_embedding super().__init__( pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(__lowercase ) class __magic_name__ (__lowercase ): def __init__( self , *_a , **_a ) -> Optional[int]: super().__init__(*_a , **_a ) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __a ( self , **_a ) -> Optional[Any]: lowerCAmelCase_ = {} if "threshold" in kwargs: lowerCAmelCase_ = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self , *_a , **_a ) -> Union[Predictions, List[Prediction]]: return super().__call__(*_a , **_a ) def __a ( self , _a ) -> Tuple: lowerCAmelCase_ = load_image(_a ) lowerCAmelCase_ = torch.IntTensor([[image.height, image.width]] ) lowerCAmelCase_ = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: lowerCAmelCase_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) lowerCAmelCase_ = target_size return inputs def __a ( self , _a ) -> List[Any]: lowerCAmelCase_ = model_inputs.pop("target_size" ) lowerCAmelCase_ = self.model(**_a ) lowerCAmelCase_ = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: lowerCAmelCase_ = model_inputs["bbox"] return model_outputs def __a ( self , _a , _a=0.9 ) -> List[Any]: lowerCAmelCase_ = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. lowerCAmelCase_ , lowerCAmelCase_ = target_size[0].tolist() def unnormalize(_a ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) lowerCAmelCase_ , lowerCAmelCase_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) lowerCAmelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] lowerCAmelCase_ = [unnormalize(_a ) for bbox in model_outputs["bbox"].squeeze(0 )] lowerCAmelCase_ = ["score", "label", "box"] lowerCAmelCase_ = [dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel lowerCAmelCase_ = self.image_processor.post_process_object_detection(_a , _a , _a ) lowerCAmelCase_ = raw_annotations[0] lowerCAmelCase_ = raw_annotation["scores"] lowerCAmelCase_ = raw_annotation["labels"] lowerCAmelCase_ = raw_annotation["boxes"] lowerCAmelCase_ = scores.tolist() lowerCAmelCase_ = [self.model.config.idalabel[label.item()] for label in labels] lowerCAmelCase_ = [self._get_bounding_box(_a ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] lowerCAmelCase_ = ["score", "label", "box"] lowerCAmelCase_ = [ dict(zip(_a , _a ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __a ( self , _a ) -> Dict[str, int]: if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = box.int().tolist() lowerCAmelCase_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration lowerCamelCase__ = 5_00_00 lowerCamelCase__ = 50_00 lowerCamelCase__ , lowerCamelCase__ = os.path.split(__file__) lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def A(__a: datasets.Dataset , __a: Union[str, Any] ): for i in range(__a ): lowerCAmelCase_ = dataset[i] @get_duration def A(__a: datasets.Dataset , __a: Optional[Any] , __a: Tuple ): for i in range(0 , len(__a ) , __a ): lowerCAmelCase_ = dataset[i : i + batch_size] @get_duration def A(__a: datasets.Dataset , __a: Tuple , __a: Optional[Any] ): with dataset.formatted_as(type=__a ): for i in range(__a ): lowerCAmelCase_ = dataset[i] @get_duration def A(__a: datasets.Dataset , __a: Any , __a: Any , __a: List[str] ): with dataset.formatted_as(type=__a ): for i in range(0 , __a , __a ): lowerCAmelCase_ = dataset[i : i + batch_size] def A(): lowerCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES} lowerCAmelCase_ = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted, {"type": "pandas", "length": SMALL_TEST}), (read_formatted, {"type": "torch", "length": SMALL_TEST}), (read_formatted, {"type": "tensorflow", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}), ] lowerCAmelCase_ = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("generating dataset" ) lowerCAmelCase_ = datasets.Features( {"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} ) lowerCAmelCase_ = generate_example_dataset( os.path.join(__a , "dataset.arrow" ) , __a , num_examples=__a , seq_shapes={"list": (100,)} , ) print("first set of iterations" ) for func, kwargs in functions: print(func.__name__ , str(__a ) ) lowerCAmelCase_ = func(__a , **__a ) print("shuffling dataset" ) lowerCAmelCase_ = dataset.shuffle() print("Second set of iterations (after shuffling" ) for func, kwargs in functions_shuffled: print("shuffled " , func.__name__ , str(__a ) ) lowerCAmelCase_ = func( __a , **__a ) with open(__a , "wb" ) as f: f.write(json.dumps(__a ).encode("utf-8" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __magic_name__ (unittest.TestCase ): @property def __a ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = PNDMScheduler() lowerCAmelCase_ = PNDMPipeline(unet=_a , scheduler=_a ) pndm.to(_a ) pndm.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" , return_dict=_a )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __magic_name__ (unittest.TestCase ): def __a ( self ) -> List[Any]: lowerCAmelCase_ = "google/ddpm-cifar10-32" lowerCAmelCase_ = UNetaDModel.from_pretrained(_a ) lowerCAmelCase_ = PNDMScheduler() lowerCAmelCase_ = PNDMPipeline(unet=_a , scheduler=_a ) pndm.to(_a ) pndm.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pndm(generator=_a , output_type="numpy" ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
def A(__a: list[int] ): lowerCAmelCase_ = [] if len(__a ) == 1: return [nums.copy()] for _ in range(len(__a ) ): lowerCAmelCase_ = nums.pop(0 ) lowerCAmelCase_ = permute(__a ) for perm in permutations: perm.append(__a ) result.extend(__a ) nums.append(__a ) return result def A(__a: List[Any] ): def backtrack(__a: int ): if start == len(__a ) - 1: output.append(nums[:] ) else: for i in range(__a , len(__a ) ): lowerCAmelCase_ , lowerCAmelCase_ = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ = nums[i], nums[start] # backtrack lowerCAmelCase_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCamelCase__ = permutea([1, 2, 3]) print(res) doctest.testmod()
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC lowerCamelCase__ = parse(importlib.metadata.version('''torch''')) def A(__a: Union[str, Version] , __a: str , __a: str ): if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" ) lowerCAmelCase_ = STR_OPERATION_TO_FUNC[operation] if isinstance(__a , __a ): lowerCAmelCase_ = parse(importlib.metadata.version(__a ) ) return operation(__a , parse(__a ) ) def A(__a: str , __a: str ): return compare_versions(__a , __a , __a )
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin lowerCamelCase__ = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class __magic_name__ : def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ) -> Any: lowerCAmelCase_ = d_model lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = prediction_length lowerCAmelCase_ = context_length lowerCAmelCase_ = cardinality lowerCAmelCase_ = num_time_features lowerCAmelCase_ = lags_sequence lowerCAmelCase_ = embedding_dimension lowerCAmelCase_ = is_training lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = context_length lowerCAmelCase_ = prediction_length + label_length lowerCAmelCase_ = label_length lowerCAmelCase_ = moving_average lowerCAmelCase_ = autocorrelation_factor def __a ( self ) -> List[Any]: return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def __a ( self , _a ) -> Optional[int]: lowerCAmelCase_ = config.context_length + max(config.lags_sequence ) lowerCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) lowerCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) lowerCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) lowerCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs lowerCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) lowerCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] ) lowerCAmelCase_ = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def __a ( self ) -> Any: lowerCAmelCase_ = self.get_config() lowerCAmelCase_ = self.prepare_autoformer_inputs_dict(_a ) return config, inputs_dict def __a ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def __a ( self , _a , _a ) -> Union[str, Any]: lowerCAmelCase_ = AutoformerModel(config=_a ).to(_a ).eval() lowerCAmelCase_ = model(**_a ) lowerCAmelCase_ = outputs.encoder_last_hidden_state lowerCAmelCase_ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ = model.get_encoder() encoder.save_pretrained(_a ) lowerCAmelCase_ = AutoformerEncoder.from_pretrained(_a ).to(_a ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = model.create_network_inputs(**_a ) lowerCAmelCase_ , lowerCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) lowerCAmelCase_ = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) lowerCAmelCase_ = encoder(inputs_embeds=_a )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) lowerCAmelCase_ = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) lowerCAmelCase_ = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) lowerCAmelCase_ = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) lowerCAmelCase_ = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ = model.get_decoder() decoder.save_pretrained(_a ) lowerCAmelCase_ = AutoformerDecoder.from_pretrained(_a ).to(_a ) lowerCAmelCase_ = decoder( trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () lowerCamelCase__ = (AutoformerForPrediction,) if is_torch_available() else () lowerCamelCase__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self ) -> Any: lowerCAmelCase_ = AutoformerModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a ) def __a ( self ) -> Any: self.config_tester.run_common_tests() def __a ( self ) -> Tuple: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_a ) lowerCAmelCase_ , lowerCAmelCase_ = model_class.from_pretrained(_a , output_loading_info=_a ) self.assertEqual(info["missing_keys"] , [] ) def __a ( self ) -> Any: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_a ) @unittest.skip(reason="Model has no tokens embeddings" ) def __a ( self ) -> int: pass def __a ( self ) -> int: lowerCAmelCase_ = inspect.signature(getattr(_a , "forward" ) ) # The main input is the name of the argument after `self` lowerCAmelCase_ = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , _a ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_a ) lowerCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(_a )] , _a ) def __a ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = True lowerCAmelCase_ = getattr(self.model_tester , "seq_length" , _a ) lowerCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , _a ) lowerCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , _a ) lowerCAmelCase_ = getattr(self.model_tester , "d_model" , _a ) lowerCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , _a ) lowerCAmelCase_ = d_model // num_attention_heads for model_class in self.all_model_classes: lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = True lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) ) lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase_ = True lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) ) lowerCAmelCase_ = outputs.encoder_attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) lowerCAmelCase_ = len(_a ) lowerCAmelCase_ = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(_a , _a ) # decoder attentions lowerCAmelCase_ = outputs.decoder_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions lowerCAmelCase_ = outputs.cross_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) ) self.assertEqual(out_len + 2 , len(_a ) ) lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def __a ( self ) -> Optional[Any]: super().test_retain_grad_hidden_states_attentions() def A(__a: Any="train-batch.pt" ): lowerCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__a , repo_type="dataset" ) lowerCAmelCase_ = torch.load(__a , map_location=__a ) return batch @require_torch @slow class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Tuple: lowerCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_a ) lowerCAmelCase_ = prepare_batch() with torch.no_grad(): lowerCAmelCase_ = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] lowerCAmelCase_ = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , _a ) lowerCAmelCase_ = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def __a ( self ) -> int: lowerCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_a ) lowerCAmelCase_ = prepare_batch("val-batch.pt" ) with torch.no_grad(): lowerCAmelCase_ = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state lowerCAmelCase_ = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , _a ) lowerCAmelCase_ = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_a ) lowerCAmelCase_ = prepare_batch("val-batch.pt" ) with torch.no_grad(): lowerCAmelCase_ = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) lowerCAmelCase_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , _a ) lowerCAmelCase_ = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=_a ) lowerCAmelCase_ = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1E-1 ) )
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''sew-d''' def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.0_2 , _a=1E-7 , _a=1E-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.0_5 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ) -> List[Any]: super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = feat_extract_norm lowerCAmelCase_ = feat_extract_activation lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = conv_bias lowerCAmelCase_ = num_conv_pos_embeddings lowerCAmelCase_ = num_conv_pos_embedding_groups lowerCAmelCase_ = len(self.conv_dim ) lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = squeeze_factor lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = position_buckets lowerCAmelCase_ = share_att_key lowerCAmelCase_ = relative_attention lowerCAmelCase_ = norm_rel_ebd lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = hidden_act lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = feat_proj_dropout lowerCAmelCase_ = final_dropout lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = feature_layer_norm_eps lowerCAmelCase_ = initializer_range lowerCAmelCase_ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase_ = apply_spec_augment lowerCAmelCase_ = mask_time_prob lowerCAmelCase_ = mask_time_length lowerCAmelCase_ = mask_time_min_masks lowerCAmelCase_ = mask_feature_prob lowerCAmelCase_ = mask_feature_length lowerCAmelCase_ = mask_feature_min_masks # ctc loss lowerCAmelCase_ = ctc_loss_reduction lowerCAmelCase_ = ctc_zero_infinity # sequence classification lowerCAmelCase_ = use_weighted_layer_sum lowerCAmelCase_ = classifier_proj_size @property def __a ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class __magic_name__ (tf.keras.layers.Layer ): def __init__( self , _a , _a , _a = None , _a = None ) -> Union[str, Any]: super().__init__() lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = max_length lowerCAmelCase_ = vocab lowerCAmelCase_ = merges lowerCAmelCase_ = BytePairTokenizer(_a , _a , sequence_length=_a ) @classmethod def __a ( cls , _a , *_a , **_a ) -> List[Any]: lowerCAmelCase_ = [" ".join(_a ) for m in tokenizer.bpe_ranks.keys()] lowerCAmelCase_ = tokenizer.get_vocab() return cls(_a , _a , *_a , **_a ) @classmethod def __a ( cls , _a , *_a , **_a ) -> List[str]: lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_a , *_a , **_a ) return cls.from_tokenizer(_a , *_a , **_a ) @classmethod def __a ( cls , _a ) -> Tuple: return cls(**_a ) def __a ( self ) -> List[str]: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def __a ( self , _a , _a = None ) -> List[str]: lowerCAmelCase_ = self.tf_tokenizer(_a ) lowerCAmelCase_ = tf.ones_like(_a ) if self.pad_token_id is not None: # pad the tokens up to max length lowerCAmelCase_ = max_length if max_length is not None else self.max_length if max_length is not None: lowerCAmelCase_ , lowerCAmelCase_ = pad_model_inputs( _a , max_seq_length=_a , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import math class __magic_name__ : def __init__( self , _a=0 ) -> str: # a graph with Node 0,1,...,N-1 lowerCAmelCase_ = n lowerCAmelCase_ = [ [math.inf for j in range(0 , _a )] for i in range(0 , _a ) ] # adjacency matrix for weight lowerCAmelCase_ = [ [math.inf for j in range(0 , _a )] for i in range(0 , _a ) ] # dp[i][j] stores minimum distance from i to j def __a ( self , _a , _a , _a ) -> Tuple: lowerCAmelCase_ = w def __a ( self ) -> Tuple: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowerCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __a ( self , _a , _a ) -> List[str]: return self.dp[u][v] if __name__ == "__main__": lowerCamelCase__ = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''perceiver''' def __init__( self , _a=256 , _a=1280 , _a=768 , _a=1 , _a=26 , _a=8 , _a=8 , _a=None , _a=None , _a="kv" , _a=1 , _a=1 , _a="gelu" , _a=0.1 , _a=0.0_2 , _a=1E-12 , _a=True , _a=262 , _a=2048 , _a=56 , _a=[368, 496] , _a=16 , _a=1920 , _a=16 , _a=[1, 16, 224, 224] , **_a , ) -> int: super().__init__(**_a ) lowerCAmelCase_ = num_latents lowerCAmelCase_ = d_latents lowerCAmelCase_ = d_model lowerCAmelCase_ = num_blocks lowerCAmelCase_ = num_self_attends_per_block lowerCAmelCase_ = num_self_attention_heads lowerCAmelCase_ = num_cross_attention_heads lowerCAmelCase_ = qk_channels lowerCAmelCase_ = v_channels lowerCAmelCase_ = cross_attention_shape_for_attention lowerCAmelCase_ = self_attention_widening_factor lowerCAmelCase_ = cross_attention_widening_factor lowerCAmelCase_ = hidden_act lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = use_query_residual # masked language modeling attributes lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_position_embeddings # image classification attributes lowerCAmelCase_ = image_size # flow attributes lowerCAmelCase_ = train_size # multimodal autoencoding attributes lowerCAmelCase_ = num_frames lowerCAmelCase_ = audio_samples_per_frame lowerCAmelCase_ = samples_per_patch lowerCAmelCase_ = output_shape class __magic_name__ (__lowercase ): @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def __a ( self ) -> float: return 1E-4 def __a ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , ) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(_a , _a ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase_ = compute_effective_axis_dimension( _a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase_ = preprocessor.num_special_tokens_to_add(_a ) lowerCAmelCase_ = compute_effective_axis_dimension( _a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size lowerCAmelCase_ = dict(preprocessor(_a , return_tensors=_a ) ) lowerCAmelCase_ = inputs.pop("input_ids" ) return inputs elif isinstance(_a , _a ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase_ = compute_effective_axis_dimension(_a , fixed_dimension=OnnxConfig.default_fixed_batch ) lowerCAmelCase_ = self._generate_dummy_images(_a , _a , _a , _a ) lowerCAmelCase_ = dict(preprocessor(images=_a , return_tensors=_a ) ) lowerCAmelCase_ = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
lowerCamelCase__ = { 0: '''0''', 1: '''1''', 2: '''2''', 3: '''3''', 4: '''4''', 5: '''5''', 6: '''6''', 7: '''7''', 8: '''8''', 9: '''9''', 10: '''a''', 11: '''b''', 12: '''c''', 13: '''d''', 14: '''e''', 15: '''f''', } def A(__a: float ): assert type(__a ) in (int, float) and decimal == int(__a ) lowerCAmelCase_ = int(__a ) lowerCAmelCase_ = "" lowerCAmelCase_ = False if decimal < 0: lowerCAmelCase_ = True decimal *= -1 while decimal > 0: lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 16 ) lowerCAmelCase_ = values[remainder] + hexadecimal lowerCAmelCase_ = "0x" + hexadecimal if negative: lowerCAmelCase_ = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __magic_name__ : def __a ( self , _a ) -> int: raise NotImplementedError() def __a ( self ) -> str: raise NotImplementedError() class __magic_name__ (__lowercase ): def __init__( self , _a , _a = False , **_a ) -> int: lowerCAmelCase_ = tokenizer lowerCAmelCase_ = skip_prompt lowerCAmelCase_ = decode_kwargs # variables used in the streaming process lowerCAmelCase_ = [] lowerCAmelCase_ = 0 lowerCAmelCase_ = True def __a ( self , _a ) -> List[str]: if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1" ) elif len(value.shape ) > 1: lowerCAmelCase_ = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowerCAmelCase_ = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowerCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("\n" ): lowerCAmelCase_ = text[self.print_len :] lowerCAmelCase_ = [] lowerCAmelCase_ = 0 # If the last token is a CJK character, we print the characters. elif len(_a ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowerCAmelCase_ = text[self.print_len :] self.print_len += len(_a ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowerCAmelCase_ = text[self.print_len : text.rfind(" " ) + 1] self.print_len += len(_a ) self.on_finalized_text(_a ) def __a ( self ) -> Tuple: # Flush the cache, if it exists if len(self.token_cache ) > 0: lowerCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) lowerCAmelCase_ = text[self.print_len :] lowerCAmelCase_ = [] lowerCAmelCase_ = 0 else: lowerCAmelCase_ = "" lowerCAmelCase_ = True self.on_finalized_text(_a , stream_end=_a ) def __a ( self , _a , _a = False ) -> Union[str, Any]: print(_a , flush=_a , end="" if not stream_end else None ) def __a ( self , _a ) -> str: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x20000 and cp <= 0x2a6df) # or (cp >= 0x2a700 and cp <= 0x2b73f) # or (cp >= 0x2b740 and cp <= 0x2b81f) # or (cp >= 0x2b820 and cp <= 0x2ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2f800 and cp <= 0x2fa1f) # ): # return True return False class __magic_name__ (__lowercase ): def __init__( self , _a , _a = False , _a = None , **_a ) -> Any: super().__init__(_a , _a , **_a ) lowerCAmelCase_ = Queue() lowerCAmelCase_ = None lowerCAmelCase_ = timeout def __a ( self , _a , _a = False ) -> int: self.text_queue.put(_a , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ) -> str: return self def __a ( self ) -> int: lowerCAmelCase_ = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''align_text_model''' def __init__( self , _a=30522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0 , _a="absolute" , _a=True , **_a , ) -> List[str]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = position_embedding_type lowerCAmelCase_ = use_cache lowerCAmelCase_ = pad_token_id @classmethod def __a ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": lowerCAmelCase_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_a , **_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''align_vision_model''' def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.2_5 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.0_2 , _a = 0.0_0_1 , _a = 0.9_9 , _a = 0.2 , **_a , ) -> int: super().__init__(**_a ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = width_coefficient lowerCAmelCase_ = depth_coefficient lowerCAmelCase_ = depth_divisor lowerCAmelCase_ = kernel_sizes lowerCAmelCase_ = in_channels lowerCAmelCase_ = out_channels lowerCAmelCase_ = depthwise_padding lowerCAmelCase_ = strides lowerCAmelCase_ = num_block_repeats lowerCAmelCase_ = expand_ratios lowerCAmelCase_ = squeeze_expansion_ratio lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dim lowerCAmelCase_ = pooling_type lowerCAmelCase_ = initializer_range lowerCAmelCase_ = batch_norm_eps lowerCAmelCase_ = batch_norm_momentum lowerCAmelCase_ = drop_connect_rate lowerCAmelCase_ = sum(_a ) * 4 @classmethod def __a ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": lowerCAmelCase_ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_a , **_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''align''' lowerCamelCase__ = True def __init__( self , _a=None , _a=None , _a=640 , _a=1.0 , _a=0.0_2 , **_a , ) -> Any: super().__init__(**_a ) if text_config is None: lowerCAmelCase_ = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: lowerCAmelCase_ = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) lowerCAmelCase_ = AlignTextConfig(**_a ) lowerCAmelCase_ = AlignVisionConfig(**_a ) lowerCAmelCase_ = projection_dim lowerCAmelCase_ = temperature_init_value lowerCAmelCase_ = initializer_range @classmethod def __a ( cls , _a , _a , **_a ) -> Optional[int]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.text_config.to_dict() lowerCAmelCase_ = self.vision_config.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } lowerCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def A(__a: Tuple , __a: List[str] , __a: Any , __a: List[Any] , __a: List[Any] ): for attribute in key.split("." ): lowerCAmelCase_ = getattr(__a , __a ) if weight_type is not None: lowerCAmelCase_ = getattr(__a , __a ).shape else: lowerCAmelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": lowerCAmelCase_ = value elif weight_type == "weight_g": lowerCAmelCase_ = value elif weight_type == "weight_v": lowerCAmelCase_ = value elif weight_type == "bias": lowerCAmelCase_ = value else: lowerCAmelCase_ = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A(__a: Any , __a: Any ): lowerCAmelCase_ = [] lowerCAmelCase_ = fairseq_model.state_dict() lowerCAmelCase_ = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase_ = False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , ) lowerCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue lowerCAmelCase_ = True if "*" in mapped_key: lowerCAmelCase_ = name.split(__a )[0].split("." )[-2] lowerCAmelCase_ = mapped_key.replace("*" , __a ) if "weight_g" in name: lowerCAmelCase_ = "weight_g" elif "weight_v" in name: lowerCAmelCase_ = "weight_v" elif "bias" in name: lowerCAmelCase_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase_ = "weight" else: lowerCAmelCase_ = None set_recursively(__a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"Unused weights: {unused_weights}" ) def A(__a: Optional[int] , __a: Union[str, Any] , __a: str , __a: Tuple , __a: int ): lowerCAmelCase_ = full_name.split("conv_layers." )[-1] lowerCAmelCase_ = name.split("." ) lowerCAmelCase_ = int(items[0] ) lowerCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) lowerCAmelCase_ = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) lowerCAmelCase_ = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." ) lowerCAmelCase_ = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) lowerCAmelCase_ = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__a ) @torch.no_grad() def A(__a: Dict , __a: Dict , __a: List[Any]=None , __a: Optional[Any]=None , __a: Optional[int]=True ): if config_path is not None: lowerCAmelCase_ = UniSpeechSatConfig.from_pretrained(__a ) else: lowerCAmelCase_ = UniSpeechSatConfig() lowerCAmelCase_ = "" if is_finetuned: lowerCAmelCase_ = UniSpeechSatForCTC(__a ) else: lowerCAmelCase_ = UniSpeechSatForPreTraining(__a ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) lowerCAmelCase_ = model[0].eval() recursively_load_weights(__a , __a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Tuple ): lowerCAmelCase_ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) lowerCAmelCase_ = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , __a ) if matches: lowerCAmelCase_ = float(matches[1] ) lowerCAmelCase_ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". lowerCAmelCase_ = 1001 lowerCAmelCase_ = "imagenet-1k-id2label.json" lowerCAmelCase_ = "huggingface/label-files" lowerCAmelCase_ = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) lowerCAmelCase_ = {int(__a ) + 1: v for k, v in idalabel.items()} lowerCAmelCase_ = "background" lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def A(): lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase_ = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def A(__a: List[Any] , __a: Dict , __a: Dict , __a: int=False ): lowerCAmelCase_ = get_mobilenet_va_config(__a ) # Load 🤗 model lowerCAmelCase_ = MobileNetVaForImageClassification(__a ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(__a , __a , __a ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor lowerCAmelCase_ = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" ) lowerCAmelCase_ = model(**__a ) lowerCAmelCase_ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": lowerCAmelCase_ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": lowerCAmelCase_ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: lowerCAmelCase_ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , __a , atol=1E-4 ) Path(__a ).mkdir(exist_ok=__a ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__a ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__a ) if push_to_hub: print("Pushing to the hub..." ) lowerCAmelCase_ = "google/" + model_name image_processor.push_to_hub(__a ) model.push_to_hub(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase__ = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __magic_name__ (__lowercase ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 class __magic_name__ (__lowercase , __lowercase ): lowerCamelCase__ = 1 @register_to_config def __init__( self , _a = 2000 , _a = 0.1_5 , _a = 0.0_1 , _a = 1_3_4_8.0 , _a = 1E-5 , _a = 1 , ) -> Tuple: # standard deviation of the initial noise distribution lowerCAmelCase_ = sigma_max # setable values lowerCAmelCase_ = None self.set_sigmas(_a , _a , _a , _a ) def __a ( self , _a , _a = None ) -> torch.FloatTensor: return sample def __a ( self , _a , _a = None , _a = None ) -> Any: lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCAmelCase_ = torch.linspace(1 , _a , _a , device=_a ) def __a ( self , _a , _a = None , _a = None , _a = None ) -> Optional[int]: lowerCAmelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min lowerCAmelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_a , _a ) lowerCAmelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCAmelCase_ = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) ) lowerCAmelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def __a ( self , _a , _a ) -> int: return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def __a ( self , _a , _a , _a , _a = None , _a = True , ) -> Union[SdeVeOutput, Tuple]: if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) lowerCAmelCase_ = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCAmelCase_ = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCAmelCase_ = timesteps.to(self.discrete_sigmas.device ) lowerCAmelCase_ = self.discrete_sigmas[timesteps].to(sample.device ) lowerCAmelCase_ = self.get_adjacent_sigma(_a , _a ).to(sample.device ) lowerCAmelCase_ = torch.zeros_like(_a ) lowerCAmelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCAmelCase_ = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCAmelCase_ = diffusion.unsqueeze(-1 ) lowerCAmelCase_ = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCAmelCase_ = randn_tensor( sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype ) lowerCAmelCase_ = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCAmelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a ) def __a ( self , _a , _a , _a = None , _a = True , ) -> Union[SchedulerOutput, Tuple]: if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCAmelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCAmelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() lowerCAmelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() lowerCAmelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCAmelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCAmelCase_ = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCAmelCase_ = step_size.unsqueeze(-1 ) lowerCAmelCase_ = sample + step_size * model_output lowerCAmelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_a ) def __a ( self , _a , _a , _a , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCAmelCase_ = timesteps.to(original_samples.device ) lowerCAmelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCAmelCase_ = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_a ) * sigmas[:, None, None, None] ) lowerCAmelCase_ = noise + original_samples return noisy_samples def __len__( self ) -> Union[str, Any]: return self.config.num_train_timesteps
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
from math import factorial def A(__a: int = 20 ): lowerCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... lowerCAmelCase_ = n // 2 return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: lowerCamelCase__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number.''')
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''layoutlmv3''' def __init__( self , _a=50265 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-5 , _a=1 , _a=0 , _a=2 , _a=1024 , _a=128 , _a=128 , _a=True , _a=32 , _a=128 , _a=64 , _a=256 , _a=True , _a=True , _a=True , _a=224 , _a=3 , _a=16 , _a=None , **_a , ) -> Dict: super().__init__( vocab_size=_a , hidden_size=_a , num_hidden_layers=_a , num_attention_heads=_a , intermediate_size=_a , hidden_act=_a , hidden_dropout_prob=_a , attention_probs_dropout_prob=_a , max_position_embeddings=_a , type_vocab_size=_a , initializer_range=_a , layer_norm_eps=_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , ) lowerCAmelCase_ = max_ad_position_embeddings lowerCAmelCase_ = coordinate_size lowerCAmelCase_ = shape_size lowerCAmelCase_ = has_relative_attention_bias lowerCAmelCase_ = rel_pos_bins lowerCAmelCase_ = max_rel_pos lowerCAmelCase_ = has_spatial_attention_bias lowerCAmelCase_ = rel_ad_pos_bins lowerCAmelCase_ = max_rel_ad_pos lowerCAmelCase_ = text_embed lowerCAmelCase_ = visual_embed lowerCAmelCase_ = input_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = patch_size lowerCAmelCase_ = classifier_dropout class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.12''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def __a ( self ) -> float: return 1E-5 @property def __a ( self ) -> int: return 12 def __a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , ) -> Mapping[str, Any]: setattr(processor.image_processor , "apply_ocr" , _a ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase_ = compute_effective_axis_dimension( _a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase_ = processor.tokenizer.num_special_tokens_to_add(_a ) lowerCAmelCase_ = compute_effective_axis_dimension( _a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase_ = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes lowerCAmelCase_ = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) lowerCAmelCase_ = self._generate_dummy_images(_a , _a , _a , _a ) lowerCAmelCase_ = dict( processor( _a , text=_a , boxes=_a , return_tensors=_a , ) ) return inputs
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (SCREAMING_SNAKE_CASE_ ): lowerCamelCase__ = ['''pixel_values'''] def __init__( self , _a = True , _a = None , _a = 0.9 , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = 1 / 255 , _a = True , _a = True , _a = None , _a = None , **_a , ) -> None: super().__init__(**__a ) lowerCAmelCase_ = size if size is not None else {'shortest_edge': 224} lowerCAmelCase_ = get_size_dict(__a , default_to_square=__a ) lowerCAmelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCAmelCase_ = get_size_dict(__a , param_name="crop_size" ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = crop_pct lowerCAmelCase_ = resample lowerCAmelCase_ = do_center_crop lowerCAmelCase_ = crop_size lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __a ( self , _a , _a , _a = None , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: lowerCAmelCase_ = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f"size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: lowerCAmelCase_ = int(size["shortest_edge"] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: lowerCAmelCase_ = int(size["height"] / crop_pct ) else: lowerCAmelCase_ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct )) else: raise ValueError("Invalid size for resize: {}".format(__a ) ) lowerCAmelCase_ = get_resize_output_image_size(__a , size=__a , default_to_square=__a ) else: if "shortest_edge" in size: lowerCAmelCase_ = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a ) elif "height" in size and "width" in size: lowerCAmelCase_ = (size['height'], size['width']) else: raise ValueError("Invalid size for resize: {}".format(__a ) ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __a ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: lowerCAmelCase_ = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(f"size must contain \'height\' and \'width\' as keys. Got {size.keys()}" ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __a ( self , _a , _a , _a = None , **_a , ) -> Any: return rescale(__a , scale=__a , data_format=__a , **__a ) def __a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(__a , default_to_square=__a ) lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size lowerCAmelCase_ = get_size_dict(__a , param_name="crop_size" ) lowerCAmelCase_ = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_pct is None: raise ValueError("Crop_pct must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(__a ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a ) for image in images] if do_center_crop: lowerCAmelCase_ = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images] lowerCAmelCase_ = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a )
350
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
0