code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import numpy as np def __a(SCREAMING_SNAKE_CASE_ : np.array ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _SCREAMING_SNAKE_CASE = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" _SCREAMING_SNAKE_CASE = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" _SCREAMING_SNAKE_CASE = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="auto" , _lowerCAmelCase=-1 , _lowerCAmelCase=0.9 , _lowerCAmelCase=5 , _lowerCAmelCase=500 , _lowerCAmelCase="gpt2-large" , _lowerCAmelCase=-1 , _lowerCAmelCase=1024 , _lowerCAmelCase=25 , _lowerCAmelCase=5 , _lowerCAmelCase=True , _lowerCAmelCase=25 , ) -> Optional[int]: _lowerCAmelCase = compute_mauve( p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , ) return out
18
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text
18
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = "umt5" __lowerCamelCase : List[Any] = ["past_key_values"] def __init__( self , _lowerCAmelCase=250112 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=1024 , _lowerCAmelCase=8 , _lowerCAmelCase=None , _lowerCAmelCase=6 , _lowerCAmelCase=32 , _lowerCAmelCase=128 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1.0 , _lowerCAmelCase="gated-gelu" , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="T5Tokenizer" , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , **_lowerCAmelCase , ) -> Dict: super().__init__( is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) _lowerCAmelCase = vocab_size _lowerCAmelCase = d_model _lowerCAmelCase = d_kv _lowerCAmelCase = d_ff _lowerCAmelCase = num_layers _lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _lowerCAmelCase = num_heads _lowerCAmelCase = relative_attention_num_buckets _lowerCAmelCase = relative_attention_max_distance _lowerCAmelCase = dropout_rate _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_factor _lowerCAmelCase = feed_forward_proj _lowerCAmelCase = use_cache _lowerCAmelCase = self.feed_forward_proj.split("-" ) _lowerCAmelCase = act_info[-1] _lowerCAmelCase = act_info[0] == "gated" if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": _lowerCAmelCase = "gelu_new" @property def _snake_case ( self ) -> Optional[Any]: return self.d_model @property def _snake_case ( self ) -> Tuple: return self.num_heads @property def _snake_case ( self ) -> str: return self.num_layers class lowerCAmelCase_ ( __magic_name__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: _lowerCAmelCase = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: _lowerCAmelCase = "past_encoder_sequence + sequence" _lowerCAmelCase = {0: "batch"} _lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def _snake_case ( self ) -> int: return 13 @property def _snake_case ( self ) -> float: return 5E-4
18
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "blenderbot-small" __lowerCamelCase : Optional[Any] = ["past_key_values"] __lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict: _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = use_cache _lowerCAmelCase = encoder_layers _lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) class lowerCAmelCase_ ( __magic_name__ ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase = {0: "batch"} _lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} else: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super().outputs else: _lowerCAmelCase = super(_lowerCAmelCase , self ).outputs if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Generate decoder inputs _lowerCAmelCase = seq_length if not self.use_past else 1 _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape _lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1] _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = decoder_seq_length + 3 _lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers _lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), ) ) # TODO: test this. _lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_lowerCAmelCase , _lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowerCAmelCase = seqlen + 2 _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = common_inputs["attention_mask"].dtype _lowerCAmelCase = torch.cat( [common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase ) ] return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase ) _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) elif self.task == "causal-lm": _lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) else: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
18
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=32 * 4 , _lowerCAmelCase=32 * 6 , _lowerCAmelCase=4 , _lowerCAmelCase=32 , ) -> Tuple: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = is_training _lowerCAmelCase = use_auxiliary_loss _lowerCAmelCase = num_queries _lowerCAmelCase = num_channels _lowerCAmelCase = min_size _lowerCAmelCase = max_size _lowerCAmelCase = num_labels _lowerCAmelCase = mask_feature_size def _snake_case ( self ) -> int: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _lowerCAmelCase ) _lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase ) _lowerCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5 ).float() _lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long() _lowerCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _snake_case ( self ) -> Optional[Any]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = output.encoder_hidden_states _lowerCAmelCase = output.pixel_decoder_hidden_states _lowerCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]: with torch.no_grad(): _lowerCAmelCase = MaskFormerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = MaskFormerForInstanceSegmentation(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() def comm_check_on_output(_lowerCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCAmelCase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) comm_check_on_output(_lowerCAmelCase ) _lowerCAmelCase = model( pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ) comm_check_on_output(_lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowerCamelCase : List[str] = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowerCamelCase : Any = False __lowerCamelCase : List[str] = False __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : int = False def _snake_case ( self ) -> Any: _lowerCAmelCase = MaskFormerModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase ) def _snake_case ( self ) -> Tuple: self.config_tester.run_common_tests() def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def _snake_case ( self ) -> List[Any]: pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def _snake_case ( self ) -> Any: pass @unittest.skip(reason="MaskFormer is not a generative model" ) def _snake_case ( self ) -> Tuple: pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def _snake_case ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _snake_case ( self ) -> Union[str, Any]: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Dict: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) @slow def _snake_case ( self ) -> Dict: for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCAmelCase = MaskFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = (self.model_tester.min_size,) * 2 _lowerCAmelCase = { "pixel_values": torch.randn((2, 3, *size) , device=_lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=_lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(), } _lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase ) _lowerCAmelCase = model(**_lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ).to(_lowerCAmelCase ) _lowerCAmelCase = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _snake_case ( self ) -> Optional[Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase = self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() _lowerCAmelCase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss loss.backward() def _snake_case ( self ) -> Any: # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase = self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() _lowerCAmelCase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ) _lowerCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _SCREAMING_SNAKE_CASE = 1e-4 def __a(): '''simple docstring''' _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> Tuple: return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_lowerCAmelCase ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) _lowerCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _lowerCAmelCase = model(**_lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) _lowerCAmelCase = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) _lowerCAmelCase = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(_lowerCAmelCase ) .eval() ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) _lowerCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _lowerCAmelCase = model(**_lowerCAmelCase ) # masks_queries_logits _lowerCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase = [ [-1.3737124, -1.7724937, -1.9364233], [-1.5977281, -1.9867939, -2.1523695], [-1.5795398, -1.9269832, -2.093942], ] _lowerCAmelCase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) # class_queries_logits _lowerCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(_lowerCAmelCase ) .eval() ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) _lowerCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _lowerCAmelCase = model(**_lowerCAmelCase ) # masks_queries_logits _lowerCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _lowerCAmelCase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) # class_queries_logits _lowerCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(_lowerCAmelCase ) .eval() ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) _lowerCAmelCase = inputs["pixel_values"].to(_lowerCAmelCase ) _lowerCAmelCase = [el.to(_lowerCAmelCase ) for el in inputs["mask_labels"]] _lowerCAmelCase = [el.to(_lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): _lowerCAmelCase = model(**_lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
18
'''simple docstring''' import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" _SCREAMING_SNAKE_CASE = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str: if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] ) _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] ) else: _lowerCAmelCase = np.asarray(_lowerCAmelCase ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) if ignore_case: _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: _lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) if ignore_numbers: _lowerCAmelCase = string.digits.maketrans("" , "" , string.digits ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
18
1
'''simple docstring''' from sklearn.metrics import fa_score import datasets _SCREAMING_SNAKE_CASE = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" _SCREAMING_SNAKE_CASE = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase="binary" , _lowerCAmelCase=None ) -> Optional[Any]: _lowerCAmelCase = fa_score( _lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase ) return {"f1": float(_lowerCAmelCase ) if score.size == 1 else score}
18
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' _lowerCAmelCase = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): _lowerCAmelCase = n - k # Calculate C(n,k) for i in range(SCREAMING_SNAKE_CASE_ ): result *= n - i result //= i + 1 return result def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1) def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if n < 0: raise ValueError("factorial() not defined for negative values" ) _lowerCAmelCase = 1 for i in range(1 , n + 1 ): result *= i return result def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = "falcon" __lowerCamelCase : List[str] = ["past_key_values"] def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase ) _lowerCAmelCase = hidden_size if n_embed is None else n_embed _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCAmelCase = alibi _lowerCAmelCase = new_decoder_architecture _lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True _lowerCAmelCase = parallel_attn _lowerCAmelCase = bias super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) @property def _snake_case ( self ) -> Optional[Any]: return self.hidden_size // self.num_attention_heads @property def _snake_case ( self ) -> Optional[Any]: return not self.alibi
18
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
18
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "deit" def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = qkv_bias _lowerCAmelCase = encoder_stride class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = version.parse("1.11" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ) -> float: return 1E-4
18
1
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> List[Any]: # A mock response for an HTTP head request to emulate server down _lowerCAmelCase = mock.Mock() _lowerCAmelCase = 500 _lowerCAmelCase = {} _lowerCAmelCase = HTTPError _lowerCAmelCase = {} # Download this model to make sure it's in the cache. _lowerCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=_lowerCAmelCase ) as mock_head: _lowerCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _snake_case ( self ) -> Union[str, Any]: # A mock response for an HTTP head request to emulate server down _lowerCAmelCase = mock.Mock() _lowerCAmelCase = 500 _lowerCAmelCase = {} _lowerCAmelCase = HTTPError _lowerCAmelCase = {} # Download this model to make sure it's in the cache. _lowerCAmelCase = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=_lowerCAmelCase ) as mock_head: _lowerCAmelCase = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self ) -> str: # This test is for deprecated behavior and can be removed in v5 try: _lowerCAmelCase = tempfile.mktemp() with open(_lowerCAmelCase , "wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , _lowerCAmelCase ) _lowerCAmelCase = AlbertTokenizer.from_pretrained(_lowerCAmelCase ) finally: os.remove(_lowerCAmelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" , "wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , _lowerCAmelCase ) _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def _snake_case ( self ) -> Tuple: # This test is for deprecated behavior and can be removed in v5 _lowerCAmelCase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): __lowerCamelCase : Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def _snake_case ( cls ) -> Dict: _lowerCAmelCase = TOKEN HfFolder.save_token(_lowerCAmelCase ) @classmethod def _snake_case ( cls ) -> Any: try: delete_repo(token=cls._token , repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def _snake_case ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = os.path.join(_lowerCAmelCase , "vocab.txt" ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) _lowerCAmelCase = BertTokenizer(_lowerCAmelCase ) tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token ) _lowerCAmelCase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase , repo_id="test-tokenizer" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token ) _lowerCAmelCase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def _snake_case ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = os.path.join(_lowerCAmelCase , "vocab.txt" ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) _lowerCAmelCase = BertTokenizer(_lowerCAmelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token ) _lowerCAmelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _lowerCAmelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token ) _lowerCAmelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def _snake_case ( self ) -> int: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = os.path.join(_lowerCAmelCase , "vocab.txt" ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) _lowerCAmelCase = CustomTokenizer(_lowerCAmelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token ) _lowerCAmelCase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCAmelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = os.path.join(_lowerCAmelCase , "vocab.txt" ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) _lowerCAmelCase = BertTokenizerFast.from_pretrained(_lowerCAmelCase ) bert_tokenizer.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = CustomTokenizerFast.from_pretrained(_lowerCAmelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token ) _lowerCAmelCase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" ) _lowerCAmelCase = AutoTokenizer.from_pretrained( f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCAmelCase , trust_remote_code=_lowerCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" ) class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> str: _lowerCAmelCase = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) , ["A", "BC"] ) self.assertEqual(trie.split("BCA" ) , ["BC", "A"] ) def _snake_case ( self ) -> Any: _lowerCAmelCase = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) , ["AB", "C"] ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] ) def _snake_case ( self ) -> Any: # Even if the offsets are wrong, we necessarily output correct string # parts. _lowerCAmelCase = Trie() _lowerCAmelCase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(_lowerCAmelCase , ["AB", "C"] )
18
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "deit" def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = qkv_bias _lowerCAmelCase = encoder_stride class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = version.parse("1.11" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ) -> float: return 1E-4
18
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = size if size is not None else {"shortest_edge": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCAmelCase = int((256 / 224) * size["shortest_edge"] ) _lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
18
1
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __a(SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = coefficient_matrix.shape _lowerCAmelCase , _lowerCAmelCase = constant_matrix.shape if rowsa != colsa: _lowerCAmelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(SCREAMING_SNAKE_CASE_ ) if colsa != 1: _lowerCAmelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(SCREAMING_SNAKE_CASE_ ) if rowsa != rowsa: _lowerCAmelCase = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) != rowsa: _lowerCAmelCase = ( "Number of initial values must be equal to number of rows in coefficient " F'''matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}''' ) raise ValueError(SCREAMING_SNAKE_CASE_ ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) _lowerCAmelCase = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) _lowerCAmelCase , _lowerCAmelCase = table.shape strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ ) # Iterates the whole matrix for given number of times for _ in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = [] for row in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = 0 for col in range(SCREAMING_SNAKE_CASE_ ): if col == row: _lowerCAmelCase = table[row][col] elif col == cols - 1: _lowerCAmelCase = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] _lowerCAmelCase = (temp + val) / denom new_val.append(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = new_val return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val] def __a(SCREAMING_SNAKE_CASE_ : NDArray[floataa] ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = table.shape _lowerCAmelCase = True for i in range(0 , SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "donut-swin" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
18
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "swinv2" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) _lowerCAmelCase = (0, 0, 0, 0)
18
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = "gpt_bigcode" __lowerCamelCase : Optional[int] = ["past_key_values"] __lowerCamelCase : List[str] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = n_inner _lowerCAmelCase = activation_function _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = attn_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = scale_attn_weights _lowerCAmelCase = use_cache _lowerCAmelCase = attention_softmax_in_fpaa _lowerCAmelCase = scale_attention_softmax_in_fpaa _lowerCAmelCase = multi_query _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
18
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[Any] = AutoencoderKL __lowerCamelCase : List[Any] = "sample" __lowerCamelCase : Tuple = 1e-2 @property def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = 4 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) return {"sample": image} @property def _snake_case ( self ) -> Any: return (3, 32, 32) @property def _snake_case ( self ) -> List[Any]: return (3, 32, 32) def _snake_case ( self ) -> str: _lowerCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Any: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _snake_case ( self ) -> str: # enable deterministic behavior for gradient checkpointing _lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) model.to(_lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training _lowerCAmelCase = model(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _lowerCAmelCase = torch.randn_like(_lowerCAmelCase ) _lowerCAmelCase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _lowerCAmelCase = model_a(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _lowerCAmelCase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) _lowerCAmelCase = dict(model.named_parameters() ) _lowerCAmelCase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_lowerCAmelCase ) _lowerCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _snake_case ( self ) -> Dict: _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) _lowerCAmelCase = model.to(_lowerCAmelCase ) model.eval() if torch_device == "mps": _lowerCAmelCase = torch.manual_seed(0 ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _lowerCAmelCase = image.to(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample _lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _lowerCAmelCase = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": _lowerCAmelCase = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: _lowerCAmelCase = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy''' def _snake_case ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase ) return image def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = "fp16" if fpaa else None _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = AutoencoderKL.from_pretrained( _lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , ) model.to(_lowerCAmelCase ).eval() return model def _snake_case ( self , _lowerCAmelCase=0 ) -> str: if torch_device == "mps": return torch.manual_seed(_lowerCAmelCase ) return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist _lowerCAmelCase = dist.sample(generator=_lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) _lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
18
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["sentencepiece"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] )
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = "gpt_bigcode" __lowerCamelCase : Optional[int] = ["past_key_values"] __lowerCamelCase : List[str] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = n_inner _lowerCAmelCase = activation_function _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = attn_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = scale_attn_weights _lowerCAmelCase = use_cache _lowerCAmelCase = attention_softmax_in_fpaa _lowerCAmelCase = scale_attention_softmax_in_fpaa _lowerCAmelCase = multi_query _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , ) -> Optional[Any]: _lowerCAmelCase = parent _lowerCAmelCase = 13 _lowerCAmelCase = 7 _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = 99 _lowerCAmelCase = 32 _lowerCAmelCase = 2 _lowerCAmelCase = 4 _lowerCAmelCase = 37 _lowerCAmelCase = "gelu" _lowerCAmelCase = 0.1 _lowerCAmelCase = 0.1 _lowerCAmelCase = 512 _lowerCAmelCase = 16 _lowerCAmelCase = 2 _lowerCAmelCase = 0.02 _lowerCAmelCase = 3 _lowerCAmelCase = 4 _lowerCAmelCase = None def _snake_case ( self ) -> Dict: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ) -> str: ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: _lowerCAmelCase = TFEsmModel(config=_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} _lowerCAmelCase = model(_lowerCAmelCase ) _lowerCAmelCase = [input_ids, input_mask] _lowerCAmelCase = model(_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = True _lowerCAmelCase = TFEsmModel(config=_lowerCAmelCase ) _lowerCAmelCase = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } _lowerCAmelCase = model(_lowerCAmelCase ) _lowerCAmelCase = [input_ids, input_mask] _lowerCAmelCase = model(_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ) # Also check the case where encoder outputs are not passed _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = TFEsmForMaskedLM(config=_lowerCAmelCase ) _lowerCAmelCase = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = self.num_labels _lowerCAmelCase = TFEsmForTokenClassification(config=_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Tuple = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __lowerCamelCase : Any = ( { "feature-extraction": TFEsmModel, "fill-mask": TFEsmForMaskedLM, "text-classification": TFEsmForSequenceClassification, "token-classification": TFEsmForTokenClassification, "zero-shot": TFEsmForSequenceClassification, } if is_tf_available() else {} ) __lowerCamelCase : List[Any] = False __lowerCamelCase : int = False def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = TFEsmModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def _snake_case ( self ) -> List[str]: self.config_tester.run_common_tests() def _snake_case ( self ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_lowerCAmelCase ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @slow def _snake_case ( self ) -> Optional[int]: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = TFEsmModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip("Protein models do not support embedding resizing." ) def _snake_case ( self ) -> Dict: pass @unittest.skip("Protein models do not support embedding resizing." ) def _snake_case ( self ) -> Dict: pass def _snake_case ( self ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer _lowerCAmelCase = model.get_bias() assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for k, v in name.items(): assert isinstance(_lowerCAmelCase , tf.Variable ) else: _lowerCAmelCase = model.get_output_embeddings() assert x is None _lowerCAmelCase = model.get_bias() assert name is None @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) _lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , _lowerCAmelCase ) # compare the actual values for a slice. _lowerCAmelCase = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) _lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _lowerCAmelCase = model(_lowerCAmelCase )[0] # compare the actual values for a slice. _lowerCAmelCase = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
18
'''simple docstring''' import math from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = "data2vec-audio" def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = feat_extract_activation _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = conv_bias _lowerCAmelCase = num_conv_pos_embeddings _lowerCAmelCase = num_conv_pos_embedding_groups _lowerCAmelCase = conv_pos_kernel_size _lowerCAmelCase = len(self.conv_dim ) _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = feat_proj_dropout _lowerCAmelCase = final_dropout _lowerCAmelCase = layerdrop _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = vocab_size _lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase = mask_time_prob _lowerCAmelCase = mask_time_length _lowerCAmelCase = mask_time_min_masks _lowerCAmelCase = mask_feature_prob _lowerCAmelCase = mask_feature_length _lowerCAmelCase = mask_feature_min_masks # ctc loss _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # adapter _lowerCAmelCase = add_adapter _lowerCAmelCase = adapter_kernel_size _lowerCAmelCase = adapter_stride _lowerCAmelCase = num_adapter_layers _lowerCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = xvector_output_dim @property def _snake_case ( self ) -> str: return math.prod(self.conv_stride )
18
1
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets _SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _snake_case ( self ) -> Tuple: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]: _lowerCAmelCase = mean_squared_error( _lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase ) return {"mse": mse}
18
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = (DDPMParallelScheduler,) def _snake_case ( self , **_lowerCAmelCase ) -> int: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase ) def _snake_case ( self ) -> Any: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: self.check_over_configs(thresholding=_lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , ) def _snake_case ( self ) -> int: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Dict: for t in [0, 500, 999]: self.check_over_forward(time_step=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = self.dummy_sample_deter + 0.1 _lowerCAmelCase = self.dummy_sample_deter - 0.1 _lowerCAmelCase = samplea.shape[0] _lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) _lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase ) _lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_lowerCAmelCase ) _lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(_lowerCAmelCase ): if i == len(_lowerCAmelCase ) - 1: _lowerCAmelCase = -1 else: _lowerCAmelCase = timesteps[i + 1] _lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase ) _lowerCAmelCase = prev_t.item() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] _lowerCAmelCase = len(_lowerCAmelCase ) with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=_lowerCAmelCase )
18
1
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: _SCREAMING_SNAKE_CASE = None try: import msvcrt except ImportError: _SCREAMING_SNAKE_CASE = None try: import fcntl except ImportError: _SCREAMING_SNAKE_CASE = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: _SCREAMING_SNAKE_CASE = OSError # Data # ------------------------------------------------ _SCREAMING_SNAKE_CASE = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] _SCREAMING_SNAKE_CASE = "3.0.12" _SCREAMING_SNAKE_CASE = None def __a(): '''simple docstring''' global _logger _lowerCAmelCase = _logger or logging.getLogger(__name__ ) return _logger class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = lock_file return None def __str__( self ) -> Union[str, Any]: _lowerCAmelCase = f'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = lock return None def __enter__( self ) -> Union[str, Any]: return self.lock def __exit__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: self.lock.release() return None class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=-1 , _lowerCAmelCase=None ) -> List[str]: _lowerCAmelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long _lowerCAmelCase = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase ) # The path to the lock file. _lowerCAmelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. _lowerCAmelCase = None # The default timeout value. _lowerCAmelCase = timeout # We use this lock primarily for the lock counter. _lowerCAmelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. _lowerCAmelCase = 0 return None @property def _snake_case ( self ) -> Tuple: return self._lock_file @property def _snake_case ( self ) -> List[Any]: return self._timeout @timeout.setter def _snake_case ( self , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = float(_lowerCAmelCase ) return None def _snake_case ( self ) -> Dict: raise NotImplementedError() def _snake_case ( self ) -> Dict: raise NotImplementedError() @property def _snake_case ( self ) -> List[str]: return self._lock_file_fd is not None def _snake_case ( self , _lowerCAmelCase=None , _lowerCAmelCase=0.05 ) -> int: # Use the default timeout, if no timeout is provided. if timeout is None: _lowerCAmelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 _lowerCAmelCase = id(self ) _lowerCAmelCase = self._lock_file _lowerCAmelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_lowerCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: _lowerCAmelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def _snake_case ( self , _lowerCAmelCase=False ) -> int: with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: _lowerCAmelCase = id(self ) _lowerCAmelCase = self._lock_file logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() _lowerCAmelCase = 0 logger().debug(f'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self ) -> List[str]: self.acquire() return self def __exit__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: self.release() return None def __del__( self ) -> int: self.release(force=_lowerCAmelCase ) return None def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = os.path.basename(_lowerCAmelCase ) if len(_lowerCAmelCase ) > max_length and max_length > 0: _lowerCAmelCase = os.path.dirname(_lowerCAmelCase ) _lowerCAmelCase = str(hash(_lowerCAmelCase ) ) _lowerCAmelCase = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(_lowerCAmelCase , _lowerCAmelCase ) else: return path class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=-1 , _lowerCAmelCase=None ) -> int: from .file_utils import relative_to_absolute_path super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) _lowerCAmelCase = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: _lowerCAmelCase = os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: try: msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_lowerCAmelCase ) else: _lowerCAmelCase = fd return None def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self._lock_file_fd _lowerCAmelCase = None msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(_lowerCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=-1 , _lowerCAmelCase=None ) -> Any: _lowerCAmelCase = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) def _snake_case ( self ) -> Any: _lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC _lowerCAmelCase = os.open(self._lock_file , _lowerCAmelCase ) try: fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_lowerCAmelCase ) else: _lowerCAmelCase = fd return None def _snake_case ( self ) -> int: # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition _lowerCAmelCase = self._lock_file_fd _lowerCAmelCase = None fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN ) os.close(_lowerCAmelCase ) return None class lowerCAmelCase_ ( __magic_name__ ): def _snake_case ( self ) -> int: _lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: _lowerCAmelCase = os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: _lowerCAmelCase = fd return None def _snake_case ( self ) -> str: os.close(self._lock_file_fd ) _lowerCAmelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None _SCREAMING_SNAKE_CASE = None if msvcrt: _SCREAMING_SNAKE_CASE = WindowsFileLock elif fcntl: _SCREAMING_SNAKE_CASE = UnixFileLock else: _SCREAMING_SNAKE_CASE = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
18
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) _lowerCAmelCase = 3 _lowerCAmelCase = do_lower_case _lowerCAmelCase = remove_space _lowerCAmelCase = keep_accents _lowerCAmelCase = vocab_file _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) _lowerCAmelCase = jieba _lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _snake_case ( self ) -> Optional[int]: return len(self.sp_model ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _lowerCAmelCase ) -> str: if self.remove_space: _lowerCAmelCase = " ".join(inputs.strip().split() ) else: _lowerCAmelCase = inputs _lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase ) _lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase = outputs.lower() return outputs def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.preprocess_text(_lowerCAmelCase ) _lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) _lowerCAmelCase = [] for piece in pieces: if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase = cur_pieces[1:] else: _lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_lowerCAmelCase ) else: new_pieces.append(_lowerCAmelCase ) return new_pieces def _snake_case ( self , _lowerCAmelCase ) -> str: return self.sp_model.PieceToId(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: return self.sp_model.IdToPiece(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip() return out_string def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is not None: return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] return ([0] * len(_lowerCAmelCase )) + [1, 1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , "wb" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
18
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = "trocr" __lowerCamelCase : Optional[int] = ["past_key_values"] __lowerCamelCase : List[str] = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=16 , _lowerCAmelCase=4096 , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Any: _lowerCAmelCase = vocab_size _lowerCAmelCase = d_model _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = activation_function _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = init_std _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = use_cache _lowerCAmelCase = scale_embedding _lowerCAmelCase = use_learned_position_embeddings _lowerCAmelCase = layernorm_embedding super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
18
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets _SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _snake_case ( self ) -> Tuple: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]: _lowerCAmelCase = mean_squared_error( _lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase ) return {"mse": mse}
18
1
'''simple docstring''' class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = n _lowerCAmelCase = [None] * self.n _lowerCAmelCase = 0 # index of the first element _lowerCAmelCase = 0 _lowerCAmelCase = 0 def __len__( self ) -> int: return self.size def _snake_case ( self ) -> bool: return self.size == 0 def _snake_case ( self ) -> Union[str, Any]: return False if self.is_empty() else self.array[self.front] def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]: if self.size >= self.n: raise Exception("QUEUE IS FULL" ) _lowerCAmelCase = data _lowerCAmelCase = (self.rear + 1) % self.n self.size += 1 return self def _snake_case ( self ) -> int: if self.size == 0: raise Exception("UNDERFLOW" ) _lowerCAmelCase = self.array[self.front] _lowerCAmelCase = None _lowerCAmelCase = (self.front + 1) % self.n self.size -= 1 return temp
18
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["ViTFeatureExtractor"] _SCREAMING_SNAKE_CASE = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ): '''simple docstring''' if nth_term == "": return [""] _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE_ ) ): series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series")) _SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series")) print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power))
18
1
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' _lowerCAmelCase = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] _lowerCAmelCase = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } _lowerCAmelCase = F'''{src_lang}-{tgt_lang}''' _lowerCAmelCase = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project _SCREAMING_SNAKE_CASE = Path(__file__).resolve().parent.parent.parent _SCREAMING_SNAKE_CASE = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: _SCREAMING_SNAKE_CASE = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
18
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] )
18
1
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0.2 , _lowerCAmelCase=0.2 ) -> List[Any]: _lowerCAmelCase = bp_numa _lowerCAmelCase = bp_numa _lowerCAmelCase = bp_numa _lowerCAmelCase = conva_get[:2] _lowerCAmelCase = conva_get[2] _lowerCAmelCase = size_pa _lowerCAmelCase = rate_w _lowerCAmelCase = rate_t _lowerCAmelCase = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] _lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) _lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) _lowerCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1 _lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 _lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _lowerCAmelCase ) -> List[str]: # save model dict with pickle _lowerCAmelCase = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(_lowerCAmelCase , "wb" ) as f: pickle.dump(_lowerCAmelCase , _lowerCAmelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _snake_case ( cls , _lowerCAmelCase ) -> Any: # read saved model with open(_lowerCAmelCase , "rb" ) as f: _lowerCAmelCase = pickle.load(_lowerCAmelCase ) # noqa: S301 _lowerCAmelCase = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) _lowerCAmelCase = model_dic.get("size_pooling1" ) _lowerCAmelCase = model_dic.get("num_bp1" ) _lowerCAmelCase = model_dic.get("num_bp2" ) _lowerCAmelCase = model_dic.get("num_bp3" ) _lowerCAmelCase = model_dic.get("rate_weight" ) _lowerCAmelCase = model_dic.get("rate_thre" ) # create model instance _lowerCAmelCase = CNN(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # modify model parameter _lowerCAmelCase = model_dic.get("w_conv1" ) _lowerCAmelCase = model_dic.get("wkj" ) _lowerCAmelCase = model_dic.get("vji" ) _lowerCAmelCase = model_dic.get("thre_conv1" ) _lowerCAmelCase = model_dic.get("thre_bp2" ) _lowerCAmelCase = model_dic.get("thre_bp3" ) return conv_ins def _snake_case ( self , _lowerCAmelCase ) -> List[str]: return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _lowerCAmelCase ) -> Tuple: return round(_lowerCAmelCase , 3 ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: # convolution process _lowerCAmelCase = convs[0] _lowerCAmelCase = convs[1] _lowerCAmelCase = np.shape(_lowerCAmelCase )[0] # get the data slice of original image data, data_focus _lowerCAmelCase = [] for i_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ): _lowerCAmelCase = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_lowerCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix _lowerCAmelCase = [] _lowerCAmelCase = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_lowerCAmelCase ): _lowerCAmelCase = [] for i_focus in range(len(_lowerCAmelCase ) ): _lowerCAmelCase = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_lowerCAmelCase ) ) _lowerCAmelCase = np.asmatrix(_lowerCAmelCase ).reshape( _lowerCAmelCase , _lowerCAmelCase ) data_featuremap.append(_lowerCAmelCase ) # expanding the data slice to One dimenssion _lowerCAmelCase = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_lowerCAmelCase ) ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="average_pool" ) -> Any: # pooling process _lowerCAmelCase = len(featuremaps[0] ) _lowerCAmelCase = int(size_map / size_pooling ) _lowerCAmelCase = [] for i_map in range(len(_lowerCAmelCase ) ): _lowerCAmelCase = featuremaps[i_map] _lowerCAmelCase = [] for i_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ): for j_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_lowerCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_lowerCAmelCase ) ) _lowerCAmelCase = np.asmatrix(_lowerCAmelCase ).reshape(_lowerCAmelCase , _lowerCAmelCase ) featuremap_pooled.append(_lowerCAmelCase ) return featuremap_pooled def _snake_case ( self , _lowerCAmelCase ) -> List[str]: # expanding three dimension data to one dimension list _lowerCAmelCase = [] for i in range(len(_lowerCAmelCase ) ): _lowerCAmelCase = np.shape(data[i] ) _lowerCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1] ) _lowerCAmelCase = data_listed.getA().tolist()[0] data_expanded.extend(_lowerCAmelCase ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) return data_expanded def _snake_case ( self , _lowerCAmelCase ) -> Any: # expanding matrix to one dimension list _lowerCAmelCase = np.asarray(_lowerCAmelCase ) _lowerCAmelCase = np.shape(_lowerCAmelCase ) _lowerCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = [] _lowerCAmelCase = 0 for i_map in range(_lowerCAmelCase ): _lowerCAmelCase = np.ones((size_map, size_map) ) for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ): for j in range(0 , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = pd_pool[ i_pool ] _lowerCAmelCase = i_pool + 1 _lowerCAmelCase = np.multiply( _lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_lowerCAmelCase ) return pd_all def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=bool ) -> Tuple: # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(_lowerCAmelCase )) ) print((" - - Shape: Teach_Data ", np.shape(_lowerCAmelCase )) ) _lowerCAmelCase = 0 _lowerCAmelCase = [] _lowerCAmelCase = 10000 while rp < n_repeat and mse >= error_accuracy: _lowerCAmelCase = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(_lowerCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) _lowerCAmelCase = np.asmatrix(datas_train[p] ) _lowerCAmelCase = np.asarray(datas_teach[p] ) _lowerCAmelCase , _lowerCAmelCase = self.convolute( _lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _lowerCAmelCase = self.pooling(_lowerCAmelCase , self.size_poolinga ) _lowerCAmelCase = np.shape(_lowerCAmelCase ) _lowerCAmelCase = self._expand(_lowerCAmelCase ) _lowerCAmelCase = data_bp_input _lowerCAmelCase = np.dot(_lowerCAmelCase , self.vji.T ) - self.thre_bpa _lowerCAmelCase = self.sig(_lowerCAmelCase ) _lowerCAmelCase = np.dot(_lowerCAmelCase , self.wkj.T ) - self.thre_bpa _lowerCAmelCase = self.sig(_lowerCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _lowerCAmelCase = np.multiply( (data_teach - bp_outa) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) ) _lowerCAmelCase = np.multiply( np.dot(_lowerCAmelCase , self.wkj ) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) ) _lowerCAmelCase = np.dot(_lowerCAmelCase , self.vji ) _lowerCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga) _lowerCAmelCase = pd_conva_pooled.T.getA().tolist() _lowerCAmelCase = self._calculate_gradient_from_pool( _lowerCAmelCase , _lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): _lowerCAmelCase = self._expand_mat(pd_conva_all[k_conv] ) _lowerCAmelCase = self.rate_weight * np.dot(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) _lowerCAmelCase = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer _lowerCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _lowerCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight _lowerCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre _lowerCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _lowerCAmelCase = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _lowerCAmelCase = rp + 1 _lowerCAmelCase = error_count / patterns all_mse.append(_lowerCAmelCase ) def draw_error(): _lowerCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_lowerCAmelCase , "+-" ) plt.plot(_lowerCAmelCase , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(_lowerCAmelCase , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: # model predict _lowerCAmelCase = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(_lowerCAmelCase )) ) for p in range(len(_lowerCAmelCase ) ): _lowerCAmelCase = np.asmatrix(datas_test[p] ) _lowerCAmelCase , _lowerCAmelCase = self.convolute( _lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _lowerCAmelCase = self.pooling(_lowerCAmelCase , self.size_poolinga ) _lowerCAmelCase = self._expand(_lowerCAmelCase ) _lowerCAmelCase = data_bp_input _lowerCAmelCase = bp_outa * self.vji.T - self.thre_bpa _lowerCAmelCase = self.sig(_lowerCAmelCase ) _lowerCAmelCase = bp_outa * self.wkj.T - self.thre_bpa _lowerCAmelCase = self.sig(_lowerCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) _lowerCAmelCase = [list(map(self.do_round , _lowerCAmelCase ) ) for each in produce_out] return np.asarray(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Tuple: # return the data of image after convoluting process so we can check it out _lowerCAmelCase = np.asmatrix(_lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = self.convolute( _lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _lowerCAmelCase = self.pooling(_lowerCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
18
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = result.headers["Location"] _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' ) with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp: fp.write(response.content ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = None with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(SCREAMING_SNAKE_CASE_ ) as f: for line in f: _lowerCAmelCase = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _lowerCAmelCase = line[: line.index(": " )] _lowerCAmelCase = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _lowerCAmelCase = line[len("FAILED " ) :] failed_tests.append(SCREAMING_SNAKE_CASE_ ) elif filename == "job_name.txt": _lowerCAmelCase = line if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` ''' F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' " problem." ) _lowerCAmelCase = None if job_name and job_links: _lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # A list with elements of the form (line of error, error, failed test) _lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return result def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) ) return errors def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ): '''simple docstring''' _lowerCAmelCase = Counter() counter.update([x[1] for x in logs] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: _lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = test.split("::" )[0] if test.startswith("tests/models/" ): _lowerCAmelCase = test.split("/" )[2] else: _lowerCAmelCase = None return test def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] _lowerCAmelCase = [x for x in logs if x[2] is not None] _lowerCAmelCase = {x[2] for x in logs} _lowerCAmelCase = {} for test in tests: _lowerCAmelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _lowerCAmelCase = sum(error_counts.values() ) if n_errors > 0: _lowerCAmelCase = {"count": n_errors, "errors": error_counts} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| no. | error | status |" _lowerCAmelCase = "|-:|:-|:-|" _lowerCAmelCase = [header, sep] for error in reduced_by_error: _lowerCAmelCase = reduced_by_error[error]["count"] _lowerCAmelCase = F'''| {count} | {error[:100]} | |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| model | no. of errors | major error | count |" _lowerCAmelCase = "|-:|-:|-:|-:|" _lowerCAmelCase = [header, sep] for model in reduced_by_model: _lowerCAmelCase = reduced_by_model[model]["count"] _lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0] _lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") _SCREAMING_SNAKE_CASE = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token) _SCREAMING_SNAKE_CASE = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _SCREAMING_SNAKE_CASE = k.find(" / ") _SCREAMING_SNAKE_CASE = k[index + len(" / ") :] _SCREAMING_SNAKE_CASE = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _SCREAMING_SNAKE_CASE = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _SCREAMING_SNAKE_CASE = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = reduce_by_error(errors) _SCREAMING_SNAKE_CASE = reduce_by_model(errors) _SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error) _SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
18
1
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = (DPMSolverSDEScheduler,) __lowerCamelCase : int = 10 def _snake_case ( self , **_lowerCAmelCase ) -> str: _lowerCAmelCase = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self ) -> Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> int: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase ) def _snake_case ( self ) -> Tuple: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def _snake_case ( self ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCAmelCase = sample.to(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCAmelCase = sample.to(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.to(_lowerCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _lowerCAmelCase = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase , use_karras_sigmas=_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.to(_lowerCAmelCase ) * scheduler.init_noise_sigma _lowerCAmelCase = sample.to(_lowerCAmelCase ) for t in scheduler.timesteps: _lowerCAmelCase = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
18
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,) __lowerCamelCase : int = (("num_inference_steps", 25),) def _snake_case ( self , **_lowerCAmelCase ) -> Any: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase , _lowerCAmelCase = sample, sample for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self ) -> int: pass def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple: if scheduler is None: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample return sample def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = 50 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 _lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> str: self.check_over_configs(thresholding=_lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , ) def _snake_case ( self ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) _lowerCAmelCase = self.full_loop( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers" def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lower_order_final=_lowerCAmelCase ) self.check_over_configs(lower_order_final=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _snake_case ( self ) -> str: self.check_over_configs(variance_type=_lowerCAmelCase ) self.check_over_configs(variance_type="learned_range" ) def _snake_case ( self ) -> int: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop() _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
18
1
'''simple docstring''' import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ): __lowerCamelCase : Union[str, Any] = BertJapaneseTokenizer __lowerCamelCase : List[Any] = False __lowerCamelCase : Any = True def _snake_case ( self ) -> List[Any]: super().setUp() _lowerCAmelCase = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", ] _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _snake_case ( self , _lowerCAmelCase ) -> int: _lowerCAmelCase = "こんにちは、世界。 \nこんばんは、世界。" _lowerCAmelCase = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase , _lowerCAmelCase = self.get_input_output_texts(_lowerCAmelCase ) _lowerCAmelCase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) return text, ids def _snake_case ( self ) -> Tuple: pass # TODO add if relevant def _snake_case ( self ) -> Optional[int]: pass # TODO add if relevant def _snake_case ( self ) -> int: pass # TODO add if relevant def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.tokenizer_class(self.vocab_file ) _lowerCAmelCase = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" ) self.assertListEqual(_lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" ) self.assertIsNotNone(_lowerCAmelCase ) _lowerCAmelCase = "こんにちは、世界。\nこんばんは、世界。" _lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _lowerCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(_lowerCAmelCase , "wb" ) as handle: pickle.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(_lowerCAmelCase , "rb" ) as handle: _lowerCAmelCase = pickle.load(_lowerCAmelCase ) _lowerCAmelCase = tokenizer_new.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = MecabTokenizer(mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _snake_case ( self ) -> Dict: try: _lowerCAmelCase = MecabTokenizer(mecab_dic="unidic_lite" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _snake_case ( self ) -> Dict: try: _lowerCAmelCase = MecabTokenizer(mecab_dic="unidic" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _snake_case ( self ) -> int: _lowerCAmelCase = MecabTokenizer(do_lower_case=_lowerCAmelCase , mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _snake_case ( self ) -> int: try: _lowerCAmelCase = MecabTokenizer( do_lower_case=_lowerCAmelCase , normalize_text=_lowerCAmelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , ) def _snake_case ( self ) -> int: _lowerCAmelCase = MecabTokenizer(normalize_text=_lowerCAmelCase , mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , ) @require_sudachi def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" ) self.assertIsNotNone(_lowerCAmelCase ) _lowerCAmelCase = "こんにちは、世界。\nこんばんは、世界。" _lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _lowerCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(_lowerCAmelCase , "wb" ) as handle: pickle.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(_lowerCAmelCase , "rb" ) as handle: _lowerCAmelCase = pickle.load(_lowerCAmelCase ) _lowerCAmelCase = tokenizer_new.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) @require_sudachi def _snake_case ( self ) -> str: _lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def _snake_case ( self ) -> Dict: _lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] ) @require_sudachi def _snake_case ( self ) -> str: _lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] ) @require_sudachi def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] ) @require_sudachi def _snake_case ( self ) -> Any: _lowerCAmelCase = SudachiTokenizer(do_lower_case=_lowerCAmelCase , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = SudachiTokenizer(normalize_text=_lowerCAmelCase , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , ) @require_sudachi def _snake_case ( self ) -> List[str]: _lowerCAmelCase = SudachiTokenizer(trim_whitespace=_lowerCAmelCase , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) @require_jumanpp def _snake_case ( self ) -> Any: _lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" ) self.assertIsNotNone(_lowerCAmelCase ) _lowerCAmelCase = "こんにちは、世界。\nこんばんは、世界。" _lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _lowerCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(_lowerCAmelCase , "wb" ) as handle: pickle.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(_lowerCAmelCase , "rb" ) as handle: _lowerCAmelCase = pickle.load(_lowerCAmelCase ) _lowerCAmelCase = tokenizer_new.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) @require_jumanpp def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _snake_case ( self ) -> str: _lowerCAmelCase = JumanppTokenizer(do_lower_case=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _snake_case ( self ) -> Tuple: _lowerCAmelCase = JumanppTokenizer(normalize_text=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = JumanppTokenizer(trim_whitespace=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , ) @require_jumanpp def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] _lowerCAmelCase = {} for i, token in enumerate(_lowerCAmelCase ): _lowerCAmelCase = i _lowerCAmelCase = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" ) _lowerCAmelCase = tokenizer.subword_tokenizer _lowerCAmelCase = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" ) self.assertListEqual(_lowerCAmelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] ) _lowerCAmelCase = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" ) self.assertListEqual(_lowerCAmelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" ) _lowerCAmelCase = tokenizer.encode("ありがとう。" , add_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.encode("どういたしまして。" , add_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ): __lowerCamelCase : Dict = BertJapaneseTokenizer __lowerCamelCase : Optional[int] = False def _snake_case ( self ) -> str: super().setUp() _lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _snake_case ( self , **_lowerCAmelCase ) -> List[Any]: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = "こんにちは、世界。 \nこんばんは、世界。" _lowerCAmelCase = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def _snake_case ( self ) -> int: pass # TODO add if relevant def _snake_case ( self ) -> List[str]: pass # TODO add if relevant def _snake_case ( self ) -> Optional[int]: pass # TODO add if relevant def _snake_case ( self ) -> Any: _lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" ) _lowerCAmelCase = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" ) self.assertListEqual( _lowerCAmelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] _lowerCAmelCase = {} for i, token in enumerate(_lowerCAmelCase ): _lowerCAmelCase = i _lowerCAmelCase = CharacterTokenizer(vocab=_lowerCAmelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] ) self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" ) _lowerCAmelCase = tokenizer.encode("ありがとう。" , add_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.encode("どういたしまして。" , add_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = "cl-tohoku/bert-base-japanese" _lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers" , level="WARNING" ) as cm: BertTokenizer.from_pretrained(_lowerCAmelCase ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) ) _lowerCAmelCase = "bert-base-cased" with self.assertLogs("transformers" , level="WARNING" ) as cm: BertJapaneseTokenizer.from_pretrained(_lowerCAmelCase ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) )
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' from collections import defaultdict def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = first_str.lower().strip() _lowerCAmelCase = second_str.lower().strip() # Remove whitespace _lowerCAmelCase = first_str.replace(" " , "" ) _lowerCAmelCase = second_str.replace(" " , "" ) # Strings of different lengths are not anagrams if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): return False # Default values for count should be 0 _lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(SCREAMING_SNAKE_CASE_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() _SCREAMING_SNAKE_CASE = input("Enter the first string ").strip() _SCREAMING_SNAKE_CASE = input("Enter the second string ").strip() _SCREAMING_SNAKE_CASE = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
18
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text
18
1
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if num <= 0: raise ValueError("Input must be a positive integer" ) _lowerCAmelCase = [True] * (num + 1) _lowerCAmelCase = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
18
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "blenderbot-small" __lowerCamelCase : Optional[Any] = ["past_key_values"] __lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict: _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = use_cache _lowerCAmelCase = encoder_layers _lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) class lowerCAmelCase_ ( __magic_name__ ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase = {0: "batch"} _lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} else: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super().outputs else: _lowerCAmelCase = super(_lowerCAmelCase , self ).outputs if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Generate decoder inputs _lowerCAmelCase = seq_length if not self.use_past else 1 _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape _lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1] _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = decoder_seq_length + 3 _lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers _lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), ) ) # TODO: test this. _lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_lowerCAmelCase , _lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowerCAmelCase = seqlen + 2 _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = common_inputs["attention_mask"].dtype _lowerCAmelCase = torch.cat( [common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase ) ] return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase ) _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) elif self.task == "causal-lm": _lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) else: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
18
1
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=16 , _lowerCAmelCase=[1, 2, 1] , _lowerCAmelCase=[2, 2, 4] , _lowerCAmelCase=2 , _lowerCAmelCase=2.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=8 , ) -> Dict: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = patch_norm _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = is_training _lowerCAmelCase = scope _lowerCAmelCase = use_labels _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = encoder_stride def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def _snake_case ( self ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = SwinvaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) _lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = SwinvaForMaskedImageModeling(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase = 1 _lowerCAmelCase = SwinvaForMaskedImageModeling(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = self.type_sequence_label_size _lowerCAmelCase = SwinvaForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[int] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __lowerCamelCase : Any = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[Any] = False __lowerCamelCase : Dict = False __lowerCamelCase : str = False def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = SwinvaModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 ) def _snake_case ( self ) -> int: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def _snake_case ( self ) -> Optional[int]: pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def _snake_case ( self ) -> Tuple: pass def _snake_case ( self ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _lowerCAmelCase = outputs.attentions _lowerCAmelCase = len(self.model_tester.depths ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase = True _lowerCAmelCase = config.window_size**2 _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) _lowerCAmelCase = len(_lowerCAmelCase ) # Check attention is always last and order is fine _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): _lowerCAmelCase = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states _lowerCAmelCase = 2 self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) ) _lowerCAmelCase = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # Swinv2 has a different seq_length _lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _lowerCAmelCase = outputs.reshaped_hidden_states self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = reshaped_hidden_states[0].shape _lowerCAmelCase = ( reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _lowerCAmelCase = True self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> str: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = 3 _lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _lowerCAmelCase = True self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _snake_case ( self ) -> Optional[int]: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = SwinvaModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: _lowerCAmelCase = model_class(config=_lowerCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def _snake_case ( self ) -> str: _lowerCAmelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( _lowerCAmelCase ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(**_lowerCAmelCase ) # verify the logits _lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
18
'''simple docstring''' import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" _SCREAMING_SNAKE_CASE = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str: if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] ) _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] ) else: _lowerCAmelCase = np.asarray(_lowerCAmelCase ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) if ignore_case: _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: _lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) if ignore_numbers: _lowerCAmelCase = string.digits.maketrans("" , "" , string.digits ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
18
1
'''simple docstring''' import random from typing import Any def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): _lowerCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) _lowerCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) _lowerCAmelCase , _lowerCAmelCase = data[b], data[a] return data if __name__ == "__main__": _SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5, 6, 7] _SCREAMING_SNAKE_CASE = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
18
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' _lowerCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if indegree[i] == 0: queue.append(SCREAMING_SNAKE_CASE_ ) while queue: _lowerCAmelCase = queue.pop(0 ) cnt += 1 topo.append(SCREAMING_SNAKE_CASE_ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(SCREAMING_SNAKE_CASE_ ) if cnt != len(SCREAMING_SNAKE_CASE_ ): print("Cycle exists" ) else: print(SCREAMING_SNAKE_CASE_ ) # Adjacency List of Graph _SCREAMING_SNAKE_CASE = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = "falcon" __lowerCamelCase : List[str] = ["past_key_values"] def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase ) _lowerCAmelCase = hidden_size if n_embed is None else n_embed _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCAmelCase = alibi _lowerCAmelCase = new_decoder_architecture _lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True _lowerCAmelCase = parallel_attn _lowerCAmelCase = bias super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) @property def _snake_case ( self ) -> Optional[Any]: return self.hidden_size // self.num_attention_heads @property def _snake_case ( self ) -> Optional[Any]: return not self.alibi
18
1
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return max(metric_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for gt in ground_truths ) def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()] _lowerCAmelCase = [] if args.gold_data_mode == "qa": _lowerCAmelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , sep="\t" , header=SCREAMING_SNAKE_CASE_ ) for answer_list in data[1]: _lowerCAmelCase = ast.literal_eval(SCREAMING_SNAKE_CASE_ ) answers.append(SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()] _lowerCAmelCase = [[reference] for reference in references] _lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = 0 for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): total += 1 em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = 100.0 * em / total _lowerCAmelCase = 100.0 * fa / total logger.info(F'''F1: {fa:.2f}''' ) logger.info(F'''EM: {em:.2f}''' ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = args.k _lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()] _lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()] _lowerCAmelCase = _lowerCAmelCase = 0 for hypo, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = set(hypo.split("\t" )[:k] ) _lowerCAmelCase = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _lowerCAmelCase = 100.0 * em / total logger.info(F'''Precision@{k}: {em: .2f}''' ) def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ): '''simple docstring''' def strip_title(SCREAMING_SNAKE_CASE_ : List[str] ): if title.startswith("\"" ): _lowerCAmelCase = title[1:] if title.endswith("\"" ): _lowerCAmelCase = title[:-1] return title _lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( SCREAMING_SNAKE_CASE_ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , )["input_ids"].to(args.device ) _lowerCAmelCase = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = question_enc_outputs[0] _lowerCAmelCase = rag_model.retriever( SCREAMING_SNAKE_CASE_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) _lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _lowerCAmelCase = [] for docs in all_docs: _lowerCAmelCase = [strip_title(SCREAMING_SNAKE_CASE_ ) for title in docs["title"]] provenance_strings.append("\t".join(SCREAMING_SNAKE_CASE_ ) ) return provenance_strings def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' with torch.no_grad(): _lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( SCREAMING_SNAKE_CASE_ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = inputs_dict.input_ids.to(args.device ) _lowerCAmelCase = inputs_dict.attention_mask.to(args.device ) _lowerCAmelCase = rag_model.generate( # rag_model overwrites generate SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) if args.print_predictions: for q, a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): logger.info("Q: {} - A: {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) return answers def __a(): '''simple docstring''' _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=SCREAMING_SNAKE_CASE_ , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=SCREAMING_SNAKE_CASE_ , choices=["exact", "compressed", "legacy"] , type=SCREAMING_SNAKE_CASE_ , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=SCREAMING_SNAKE_CASE_ , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=SCREAMING_SNAKE_CASE_ , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=SCREAMING_SNAKE_CASE_ , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=SCREAMING_SNAKE_CASE_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=SCREAMING_SNAKE_CASE_ , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=SCREAMING_SNAKE_CASE_ , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=SCREAMING_SNAKE_CASE_ , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def __a(SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' _lowerCAmelCase = {} if args.model_type is None: _lowerCAmelCase = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): _lowerCAmelCase = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration _lowerCAmelCase = args.n_docs if args.index_name is not None: _lowerCAmelCase = args.index_name if args.index_path is not None: _lowerCAmelCase = args.index_path else: _lowerCAmelCase = BartForConditionalGeneration _lowerCAmelCase = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = get_scores if args.eval_mode == "e2e" else get_precision_at_k _lowerCAmelCase = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(SCREAMING_SNAKE_CASE_ ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): _lowerCAmelCase = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , retriever=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) model.retriever.init_retrieval() else: _lowerCAmelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: _lowerCAmelCase = [] for line in tqdm(SCREAMING_SNAKE_CASE_ ): questions.append(line.strip() ) if len(SCREAMING_SNAKE_CASE_ ) == args.eval_batch_size: _lowerCAmelCase = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) preds_file.write("\n".join(SCREAMING_SNAKE_CASE_ ) + "\n" ) preds_file.flush() _lowerCAmelCase = [] if len(SCREAMING_SNAKE_CASE_ ) > 0: _lowerCAmelCase = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) preds_file.write("\n".join(SCREAMING_SNAKE_CASE_ ) ) preds_file.flush() score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = get_args() main(args)
18
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "deit" def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = qkv_bias _lowerCAmelCase = encoder_stride class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = version.parse("1.11" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ) -> float: return 1E-4
18
1
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _lowerCAmelCase = "fp16" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _lowerCAmelCase = "fp16" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) ) def _snake_case ( self ) -> Any: # pass variant but use the non-variant filenames _lowerCAmelCase = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _lowerCAmelCase = "fp16" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCAmelCase = "fp16" self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _lowerCAmelCase = "fp16" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) ) def _snake_case ( self ) -> int: # pass variant but use the non-variant filenames _lowerCAmelCase = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _lowerCAmelCase = "fp16" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _lowerCAmelCase = "fp16" self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
18
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = ["input_features", "attention_mask"] def __init__( self , _lowerCAmelCase=80 , _lowerCAmelCase=16000 , _lowerCAmelCase=80 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Tuple: super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = num_mel_bins _lowerCAmelCase = do_ceptral_normalize _lowerCAmelCase = normalize_means _lowerCAmelCase = normalize_vars _lowerCAmelCase = True def _snake_case ( self , _lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers _lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ) _lowerCAmelCase = ta_kaldi.fbank(_lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0.0 , ) -> np.ndarray: # make sure we normalize float32 arrays if normalize_means: _lowerCAmelCase = x[:input_length].mean(axis=0 ) _lowerCAmelCase = np.subtract(_lowerCAmelCase , _lowerCAmelCase ) if normalize_vars: _lowerCAmelCase = x[:input_length].std(axis=0 ) _lowerCAmelCase = np.divide(_lowerCAmelCase , _lowerCAmelCase ) if input_length < x.shape[0]: _lowerCAmelCase = padding_value # make sure array is in float32 _lowerCAmelCase = x.astype(np.floataa ) return x def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[np.ndarray]: _lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCAmelCase , _lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(_lowerCAmelCase , _lowerCAmelCase ) ] def __call__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _lowerCAmelCase = isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _lowerCAmelCase = is_batched_numpy or ( isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowerCAmelCase = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ): _lowerCAmelCase = np.asarray(_lowerCAmelCase , dtype=np.floataa ) elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowerCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowerCAmelCase = [raw_speech] # extract fbank features _lowerCAmelCase = [self._extract_fbank_features(_lowerCAmelCase ) for waveform in raw_speech] # convert into correct format for padding _lowerCAmelCase = BatchFeature({"input_features": features} ) _lowerCAmelCase = self.pad( _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , ) # make sure list is in array format _lowerCAmelCase = padded_inputs.get("input_features" ) if isinstance(input_features[0] , _lowerCAmelCase ): _lowerCAmelCase = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_features] _lowerCAmelCase = padded_inputs.get("attention_mask" ) if attention_mask is not None: _lowerCAmelCase = [np.asarray(_lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: _lowerCAmelCase = ( np.array(_lowerCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _lowerCAmelCase = self.normalize( padded_inputs["input_features"] , attention_mask=_lowerCAmelCase ) if return_tensors is not None: _lowerCAmelCase = padded_inputs.convert_to_tensors(_lowerCAmelCase ) return padded_inputs
18
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = size if size is not None else {"shortest_edge": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCAmelCase = int((256 / 224) * size["shortest_edge"] ) _lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
18
1
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase ) -> List[Any]: super().__init__() _lowerCAmelCase = nn.ModuleList(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase , self.nets ) ): _lowerCAmelCase , _lowerCAmelCase = controlnet( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) # merge samples if i == 0: _lowerCAmelCase , _lowerCAmelCase = down_samples, mid_sample else: _lowerCAmelCase = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(_lowerCAmelCase , _lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Tuple: _lowerCAmelCase = 0 _lowerCAmelCase = save_directory for controlnet in self.nets: controlnet.save_pretrained( _lowerCAmelCase , is_main_process=_lowerCAmelCase , save_function=_lowerCAmelCase , safe_serialization=_lowerCAmelCase , variant=_lowerCAmelCase , ) idx += 1 _lowerCAmelCase = model_path_to_save + f'''_{idx}''' @classmethod def _snake_case ( cls , _lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = 0 _lowerCAmelCase = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCAmelCase = pretrained_model_path while os.path.isdir(_lowerCAmelCase ): _lowerCAmelCase = ControlNetModel.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) controlnets.append(_lowerCAmelCase ) idx += 1 _lowerCAmelCase = pretrained_model_path + f'''_{idx}''' logger.info(f'''{len(_lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.''' ) if len(_lowerCAmelCase ) == 0: raise ValueError( f'''No ControlNets found under {os.path.dirname(_lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.''' ) return cls(_lowerCAmelCase )
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "donut-swin" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
18
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = "falcon" __lowerCamelCase : List[str] = ["past_key_values"] def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase ) _lowerCAmelCase = hidden_size if n_embed is None else n_embed _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCAmelCase = alibi _lowerCAmelCase = new_decoder_architecture _lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True _lowerCAmelCase = parallel_attn _lowerCAmelCase = bias super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) @property def _snake_case ( self ) -> Optional[Any]: return self.hidden_size // self.num_attention_heads @property def _snake_case ( self ) -> Optional[Any]: return not self.alibi
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "swinv2" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) _lowerCAmelCase = (0, 0, 0, 0)
18
1
'''simple docstring''' _SCREAMING_SNAKE_CASE = 2_56 # Modulus to hash a string _SCREAMING_SNAKE_CASE = 1_00_00_03 def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) if p_len > t_len: return False _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _lowerCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _lowerCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _lowerCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __a(): '''simple docstring''' _lowerCAmelCase = "abc1abc12" _lowerCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _lowerCAmelCase = "alskfjaldsk23adsfabcabc" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 2) _lowerCAmelCase = "ABABX" _lowerCAmelCase = "ABABZABABYABABX" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 3) _lowerCAmelCase = "AAAB" _lowerCAmelCase = "ABAAAAAB" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 4) _lowerCAmelCase = "abcdabcy" _lowerCAmelCase = "abcxabcdabxabcdabcdabcy" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test 5) _lowerCAmelCase = "Lü" _lowerCAmelCase = "Lüsai" assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = "Lue" assert not rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
18
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[Any] = AutoencoderKL __lowerCamelCase : List[Any] = "sample" __lowerCamelCase : Tuple = 1e-2 @property def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = 4 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) return {"sample": image} @property def _snake_case ( self ) -> Any: return (3, 32, 32) @property def _snake_case ( self ) -> List[Any]: return (3, 32, 32) def _snake_case ( self ) -> str: _lowerCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Any: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _snake_case ( self ) -> str: # enable deterministic behavior for gradient checkpointing _lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) model.to(_lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training _lowerCAmelCase = model(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _lowerCAmelCase = torch.randn_like(_lowerCAmelCase ) _lowerCAmelCase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _lowerCAmelCase = model_a(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _lowerCAmelCase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) _lowerCAmelCase = dict(model.named_parameters() ) _lowerCAmelCase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_lowerCAmelCase ) _lowerCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _snake_case ( self ) -> Dict: _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) _lowerCAmelCase = model.to(_lowerCAmelCase ) model.eval() if torch_device == "mps": _lowerCAmelCase = torch.manual_seed(0 ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _lowerCAmelCase = image.to(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample _lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _lowerCAmelCase = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": _lowerCAmelCase = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: _lowerCAmelCase = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy''' def _snake_case ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase ) return image def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = "fp16" if fpaa else None _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = AutoencoderKL.from_pretrained( _lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , ) model.to(_lowerCAmelCase ).eval() return model def _snake_case ( self , _lowerCAmelCase=0 ) -> str: if torch_device == "mps": return torch.manual_seed(_lowerCAmelCase ) return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist _lowerCAmelCase = dist.sample(generator=_lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) _lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
18
1
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig _SCREAMING_SNAKE_CASE = { "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json", "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = "ernie_m" __lowerCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , _lowerCAmelCase = 250002 , _lowerCAmelCase = 768 , _lowerCAmelCase = 12 , _lowerCAmelCase = 12 , _lowerCAmelCase = 3072 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 514 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1E-05 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ) -> Union[str, Any]: super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = classifier_dropout _lowerCAmelCase = is_decoder _lowerCAmelCase = act_dropout
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = "gpt_bigcode" __lowerCamelCase : Optional[int] = ["past_key_values"] __lowerCamelCase : List[str] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = n_inner _lowerCAmelCase = activation_function _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = attn_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = scale_attn_weights _lowerCAmelCase = use_cache _lowerCAmelCase = attention_softmax_in_fpaa _lowerCAmelCase = scale_attention_softmax_in_fpaa _lowerCAmelCase = multi_query _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = torch.device("cpu") def __a(): '''simple docstring''' _lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im def __a(SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' _lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = val def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = [] for k in state_dict.keys(): _lowerCAmelCase = k if ".pwconv" in k: _lowerCAmelCase = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: _lowerCAmelCase = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: _lowerCAmelCase = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: _lowerCAmelCase = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: _lowerCAmelCase = k_new.split("." ) if ls[2].isdigit(): _lowerCAmelCase = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: _lowerCAmelCase = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' _lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCAmelCase = 1000 _lowerCAmelCase = "huggingface/label-files" _lowerCAmelCase = "imagenet-1k-id2label.json" _lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) _lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCAmelCase = [3, 3, 6, 4] _lowerCAmelCase = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": _lowerCAmelCase = [3, 3, 9, 6] _lowerCAmelCase = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": _lowerCAmelCase = [4, 3, 10, 5] _lowerCAmelCase = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": _lowerCAmelCase = [4, 4, 12, 6] _lowerCAmelCase = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): _lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , check_hash=SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) _lowerCAmelCase = checkpoint _lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model _lowerCAmelCase = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # prepare test inputs _lowerCAmelCase = prepare_img() _lowerCAmelCase = ViTImageProcessor.from_pretrained("preprocessor_config" ) _lowerCAmelCase = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) # compare outputs from both models _lowerCAmelCase = get_expected_output(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") _SCREAMING_SNAKE_CASE = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
18
'''simple docstring''' import math from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = "data2vec-audio" def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = feat_extract_activation _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = conv_bias _lowerCAmelCase = num_conv_pos_embeddings _lowerCAmelCase = num_conv_pos_embedding_groups _lowerCAmelCase = conv_pos_kernel_size _lowerCAmelCase = len(self.conv_dim ) _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = feat_proj_dropout _lowerCAmelCase = final_dropout _lowerCAmelCase = layerdrop _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = vocab_size _lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase = mask_time_prob _lowerCAmelCase = mask_time_length _lowerCAmelCase = mask_time_min_masks _lowerCAmelCase = mask_feature_prob _lowerCAmelCase = mask_feature_length _lowerCAmelCase = mask_feature_min_masks # ctc loss _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # adapter _lowerCAmelCase = add_adapter _lowerCAmelCase = adapter_kernel_size _lowerCAmelCase = adapter_stride _lowerCAmelCase = num_adapter_layers _lowerCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = xvector_output_dim @property def _snake_case ( self ) -> str: return math.prod(self.conv_stride )
18
1
'''simple docstring''' from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : float | Decimal , SCREAMING_SNAKE_CASE_ : float = 10**-10 ): '''simple docstring''' _lowerCAmelCase = a while True: _lowerCAmelCase = Decimal(SCREAMING_SNAKE_CASE_ ) - ( Decimal(eval(SCREAMING_SNAKE_CASE_ ) ) / Decimal(eval(str(diff(SCREAMING_SNAKE_CASE_ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(SCREAMING_SNAKE_CASE_ ) ) < precision: # noqa: S307 return float(SCREAMING_SNAKE_CASE_ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
18
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = (DDPMParallelScheduler,) def _snake_case ( self , **_lowerCAmelCase ) -> int: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase ) def _snake_case ( self ) -> Any: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: self.check_over_configs(thresholding=_lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , ) def _snake_case ( self ) -> int: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Dict: for t in [0, 500, 999]: self.check_over_forward(time_step=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = self.dummy_sample_deter + 0.1 _lowerCAmelCase = self.dummy_sample_deter - 0.1 _lowerCAmelCase = samplea.shape[0] _lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) _lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase ) _lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_lowerCAmelCase ) _lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(_lowerCAmelCase ): if i == len(_lowerCAmelCase ) - 1: _lowerCAmelCase = -1 else: _lowerCAmelCase = timesteps[i + 1] _lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase ) _lowerCAmelCase = prev_t.item() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] _lowerCAmelCase = len(_lowerCAmelCase ) with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=_lowerCAmelCase )
18
1
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _SCREAMING_SNAKE_CASE = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = _TestCommandArgs(dataset=SCREAMING_SNAKE_CASE_ , all_configs=SCREAMING_SNAKE_CASE_ , save_infos=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = TestCommand(*SCREAMING_SNAKE_CASE_ ) test_command.run() _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) assert os.path.exists(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = DatasetInfosDict( { "default": DatasetInfo( features=Features( { "tokens": Sequence(Value("string" ) ), "ner_tags": Sequence( ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ), "langs": Sequence(Value("string" ) ), "spans": Sequence(Value("string" ) ), } ) , splits=[ { "name": "train", "num_bytes": 2351563, "num_examples": 10000, }, { "name": "validation", "num_bytes": 238418, "num_examples": 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: _lowerCAmelCase , _lowerCAmelCase = getattr(dataset_infos["default"] , SCREAMING_SNAKE_CASE_ ), getattr(expected_dataset_infos["default"] , SCREAMING_SNAKE_CASE_ ) if key == "num_bytes": assert is_apercent_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif key == "splits": assert list(SCREAMING_SNAKE_CASE_ ) == list(SCREAMING_SNAKE_CASE_ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
18
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) _lowerCAmelCase = 3 _lowerCAmelCase = do_lower_case _lowerCAmelCase = remove_space _lowerCAmelCase = keep_accents _lowerCAmelCase = vocab_file _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) _lowerCAmelCase = jieba _lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _snake_case ( self ) -> Optional[int]: return len(self.sp_model ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _lowerCAmelCase ) -> str: if self.remove_space: _lowerCAmelCase = " ".join(inputs.strip().split() ) else: _lowerCAmelCase = inputs _lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase ) _lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase = outputs.lower() return outputs def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.preprocess_text(_lowerCAmelCase ) _lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) _lowerCAmelCase = [] for piece in pieces: if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase = cur_pieces[1:] else: _lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_lowerCAmelCase ) else: new_pieces.append(_lowerCAmelCase ) return new_pieces def _snake_case ( self , _lowerCAmelCase ) -> str: return self.sp_model.PieceToId(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: return self.sp_model.IdToPiece(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip() return out_string def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is not None: return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] return ([0] * len(_lowerCAmelCase )) + [1, 1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , "wb" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
18
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) _lowerCAmelCase = 3 _lowerCAmelCase = do_lower_case _lowerCAmelCase = remove_space _lowerCAmelCase = keep_accents _lowerCAmelCase = vocab_file _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) _lowerCAmelCase = jieba _lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _snake_case ( self ) -> Optional[int]: return len(self.sp_model ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _lowerCAmelCase ) -> str: if self.remove_space: _lowerCAmelCase = " ".join(inputs.strip().split() ) else: _lowerCAmelCase = inputs _lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase ) _lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase = outputs.lower() return outputs def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.preprocess_text(_lowerCAmelCase ) _lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) _lowerCAmelCase = [] for piece in pieces: if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase = cur_pieces[1:] else: _lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_lowerCAmelCase ) else: new_pieces.append(_lowerCAmelCase ) return new_pieces def _snake_case ( self , _lowerCAmelCase ) -> str: return self.sp_model.PieceToId(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: return self.sp_model.IdToPiece(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip() return out_string def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is not None: return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] return ([0] * len(_lowerCAmelCase )) + [1, 1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , "wb" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
18
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets _SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _snake_case ( self ) -> Tuple: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]: _lowerCAmelCase = mean_squared_error( _lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase ) return {"mse": mse}
18
1
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[Any] = AutoencoderKL __lowerCamelCase : List[Any] = "sample" __lowerCamelCase : Tuple = 1e-2 @property def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = 4 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) return {"sample": image} @property def _snake_case ( self ) -> Any: return (3, 32, 32) @property def _snake_case ( self ) -> List[Any]: return (3, 32, 32) def _snake_case ( self ) -> str: _lowerCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Any: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _snake_case ( self ) -> str: # enable deterministic behavior for gradient checkpointing _lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) model.to(_lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training _lowerCAmelCase = model(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _lowerCAmelCase = torch.randn_like(_lowerCAmelCase ) _lowerCAmelCase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _lowerCAmelCase = model_a(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _lowerCAmelCase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) _lowerCAmelCase = dict(model.named_parameters() ) _lowerCAmelCase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_lowerCAmelCase ) _lowerCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _snake_case ( self ) -> Dict: _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) _lowerCAmelCase = model.to(_lowerCAmelCase ) model.eval() if torch_device == "mps": _lowerCAmelCase = torch.manual_seed(0 ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _lowerCAmelCase = image.to(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample _lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _lowerCAmelCase = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": _lowerCAmelCase = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: _lowerCAmelCase = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy''' def _snake_case ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase ) return image def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = "fp16" if fpaa else None _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = AutoencoderKL.from_pretrained( _lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , ) model.to(_lowerCAmelCase ).eval() return model def _snake_case ( self , _lowerCAmelCase=0 ) -> str: if torch_device == "mps": return torch.manual_seed(_lowerCAmelCase ) return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist _lowerCAmelCase = dist.sample(generator=_lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) _lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
18
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> int: _lowerCAmelCase = "ylacombe/bark-small" _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = "en_speaker_1" _lowerCAmelCase = "This is a test string" _lowerCAmelCase = "speaker_embeddings_path.json" _lowerCAmelCase = "speaker_embeddings" def _snake_case ( self , **_lowerCAmelCase ) -> str: return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def _snake_case ( self ) -> str: _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = BarkProcessor(tokenizer=_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _snake_case ( self ) -> Any: _lowerCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _lowerCAmelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _snake_case ( self ) -> Any: _lowerCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowerCAmelCase = 35 _lowerCAmelCase = 2 _lowerCAmelCase = 8 _lowerCAmelCase = { "semantic_prompt": np.ones(_lowerCAmelCase ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowerCAmelCase = processor(text=self.input_string , voice_preset=_lowerCAmelCase ) _lowerCAmelCase = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowerCAmelCase = os.path.join(self.tmpdirname , "file.npz" ) np.savez(_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = processor(text=self.input_string , voice_preset=_lowerCAmelCase ) _lowerCAmelCase = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowerCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = BarkProcessor(tokenizer=_lowerCAmelCase ) _lowerCAmelCase = processor(text=self.input_string ) _lowerCAmelCase = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ): '''simple docstring''' if nth_term == "": return [""] _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE_ ) ): series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series")) _SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series")) print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power))
18
1
'''simple docstring''' _SCREAMING_SNAKE_CASE = "0.18.2" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
18
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] )
18
1
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Tuple = "M-CLIP" def __init__( self , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , **_lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = transformerDimSize _lowerCAmelCase = imageDimSize super().__init__(**_lowerCAmelCase ) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = MCLIPConfig def __init__( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = XLMRobertaModel(_lowerCAmelCase ) _lowerCAmelCase = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] _lowerCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(_lowerCAmelCase ), embs
18
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = result.headers["Location"] _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' ) with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp: fp.write(response.content ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = None with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(SCREAMING_SNAKE_CASE_ ) as f: for line in f: _lowerCAmelCase = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _lowerCAmelCase = line[: line.index(": " )] _lowerCAmelCase = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _lowerCAmelCase = line[len("FAILED " ) :] failed_tests.append(SCREAMING_SNAKE_CASE_ ) elif filename == "job_name.txt": _lowerCAmelCase = line if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` ''' F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' " problem." ) _lowerCAmelCase = None if job_name and job_links: _lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # A list with elements of the form (line of error, error, failed test) _lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return result def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) ) return errors def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ): '''simple docstring''' _lowerCAmelCase = Counter() counter.update([x[1] for x in logs] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: _lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = test.split("::" )[0] if test.startswith("tests/models/" ): _lowerCAmelCase = test.split("/" )[2] else: _lowerCAmelCase = None return test def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] _lowerCAmelCase = [x for x in logs if x[2] is not None] _lowerCAmelCase = {x[2] for x in logs} _lowerCAmelCase = {} for test in tests: _lowerCAmelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _lowerCAmelCase = sum(error_counts.values() ) if n_errors > 0: _lowerCAmelCase = {"count": n_errors, "errors": error_counts} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| no. | error | status |" _lowerCAmelCase = "|-:|:-|:-|" _lowerCAmelCase = [header, sep] for error in reduced_by_error: _lowerCAmelCase = reduced_by_error[error]["count"] _lowerCAmelCase = F'''| {count} | {error[:100]} | |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| model | no. of errors | major error | count |" _lowerCAmelCase = "|-:|-:|-:|-:|" _lowerCAmelCase = [header, sep] for model in reduced_by_model: _lowerCAmelCase = reduced_by_model[model]["count"] _lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0] _lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") _SCREAMING_SNAKE_CASE = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token) _SCREAMING_SNAKE_CASE = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _SCREAMING_SNAKE_CASE = k.find(" / ") _SCREAMING_SNAKE_CASE = k[index + len(" / ") :] _SCREAMING_SNAKE_CASE = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _SCREAMING_SNAKE_CASE = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _SCREAMING_SNAKE_CASE = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = reduce_by_error(errors) _SCREAMING_SNAKE_CASE = reduce_by_model(errors) _SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error) _SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
18
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { "configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"], "tokenization_convbert": ["ConvBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["ConvBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvBertForMaskedLM", "ConvBertForMultipleChoice", "ConvBertForQuestionAnswering", "ConvBertForSequenceClassification", "ConvBertForTokenClassification", "ConvBertLayer", "ConvBertModel", "ConvBertPreTrainedModel", "load_tf_weights_in_convbert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFConvBertForMaskedLM", "TFConvBertForMultipleChoice", "TFConvBertForQuestionAnswering", "TFConvBertForSequenceClassification", "TFConvBertForTokenClassification", "TFConvBertLayer", "TFConvBertModel", "TFConvBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,) __lowerCamelCase : int = (("num_inference_steps", 25),) def _snake_case ( self , **_lowerCAmelCase ) -> Any: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase , _lowerCAmelCase = sample, sample for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self ) -> int: pass def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple: if scheduler is None: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample return sample def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = 50 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 _lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> str: self.check_over_configs(thresholding=_lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , ) def _snake_case ( self ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) _lowerCAmelCase = self.full_loop( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers" def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lower_order_final=_lowerCAmelCase ) self.check_over_configs(lower_order_final=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _snake_case ( self ) -> str: self.check_over_configs(variance_type=_lowerCAmelCase ) self.check_over_configs(variance_type="learned_range" ) def _snake_case ( self ) -> int: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop() _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
18
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"], "feature_extraction_whisper": ["WhisperFeatureExtractor"], "processing_whisper": ["WhisperProcessor"], "tokenization_whisper": ["WhisperTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["WhisperTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", "WhisperForAudioClassification", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperPreTrainedModel", "FlaxWhisperForAudioClassification", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=36 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1000 , ) -> Any: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = text_seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = coordinate_size _lowerCAmelCase = shape_size _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope _lowerCAmelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _lowerCAmelCase = text_seq_length _lowerCAmelCase = (image_size // patch_size) ** 2 + 1 _lowerCAmelCase = self.text_seq_length + self.image_seq_length def _snake_case ( self ) -> Dict: _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase = bbox[i, j, 3] _lowerCAmelCase = bbox[i, j, 1] _lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase = bbox[i, j, 2] _lowerCAmelCase = bbox[i, j, 0] _lowerCAmelCase = t _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _lowerCAmelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = LayoutLMvaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # text + image _lowerCAmelCase = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase ) _lowerCAmelCase = model( _lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _lowerCAmelCase = model(pixel_values=_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = LayoutLMvaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = LayoutLMvaForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model( _lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[int] = False __lowerCamelCase : Tuple = False __lowerCamelCase : Dict = False __lowerCamelCase : Tuple = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) __lowerCamelCase : Union[str, Any] = ( {"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel} if is_torch_available() else {} ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def _snake_case ( self ) -> Any: _lowerCAmelCase = LayoutLMvaModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Dict: _lowerCAmelCase = copy.deepcopy(_lowerCAmelCase ) if model_class in get_values(_lowerCAmelCase ): _lowerCAmelCase = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_lowerCAmelCase ): _lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) elif model_class in get_values(_lowerCAmelCase ): _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) elif model_class in [ *get_values(_lowerCAmelCase ), ]: _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) elif model_class in [ *get_values(_lowerCAmelCase ), ]: _lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , ) return inputs_dict def _snake_case ( self ) -> List[Any]: self.config_tester.run_common_tests() def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def _snake_case ( self ) -> str: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) @slow def _snake_case ( self ) -> List[str]: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = LayoutLMvaModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def __a(): '''simple docstring''' _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[str]: return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None @slow def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(_lowerCAmelCase ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values.to(_lowerCAmelCase ) _lowerCAmelCase = torch.tensor([[1, 2]] ) _lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass _lowerCAmelCase = model( input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , ) # verify the logits _lowerCAmelCase = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
18
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text
18
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "markuplm" def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=256 , _lowerCAmelCase=1024 , _lowerCAmelCase=216 , _lowerCAmelCase=1001 , _lowerCAmelCase=32 , _lowerCAmelCase=50 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = position_embedding_type _lowerCAmelCase = use_cache _lowerCAmelCase = classifier_dropout # additional properties _lowerCAmelCase = max_depth _lowerCAmelCase = max_xpath_tag_unit_embeddings _lowerCAmelCase = max_xpath_subs_unit_embeddings _lowerCAmelCase = tag_pad_id _lowerCAmelCase = subs_pad_id _lowerCAmelCase = xpath_unit_hidden_size
18
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "blenderbot-small" __lowerCamelCase : Optional[Any] = ["past_key_values"] __lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict: _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = use_cache _lowerCAmelCase = encoder_layers _lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) class lowerCAmelCase_ ( __magic_name__ ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase = {0: "batch"} _lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} else: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super().outputs else: _lowerCAmelCase = super(_lowerCAmelCase , self ).outputs if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Generate decoder inputs _lowerCAmelCase = seq_length if not self.use_past else 1 _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape _lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1] _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = decoder_seq_length + 3 _lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers _lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), ) ) # TODO: test this. _lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_lowerCAmelCase , _lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowerCAmelCase = seqlen + 2 _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = common_inputs["attention_mask"].dtype _lowerCAmelCase = torch.cat( [common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase ) ] return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase ) _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) elif self.task == "causal-lm": _lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) else: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
18
1
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) _lowerCAmelCase = sorted(string.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("Enter a string ").strip() _SCREAMING_SNAKE_CASE = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
18
'''simple docstring''' import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" _SCREAMING_SNAKE_CASE = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str: if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] ) _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] ) else: _lowerCAmelCase = np.asarray(_lowerCAmelCase ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) if ignore_case: _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: _lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) if ignore_numbers: _lowerCAmelCase = string.digits.maketrans("" , "" , string.digits ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
18
1
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=99 , _lowerCAmelCase=0 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase="last" , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=0 , ) -> Any: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_lengths _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = gelu_activation _lowerCAmelCase = sinusoidal_embeddings _lowerCAmelCase = causal _lowerCAmelCase = asm _lowerCAmelCase = n_langs _lowerCAmelCase = vocab_size _lowerCAmelCase = n_special _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = summary_type _lowerCAmelCase = use_proj _lowerCAmelCase = scope _lowerCAmelCase = bos_token_id def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_input_lengths: _lowerCAmelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float() _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self ) -> List[str]: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict: _lowerCAmelCase = XLMModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , lengths=_lowerCAmelCase , langs=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , langs=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict: _lowerCAmelCase = XLMWithLMHeadModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> str: _lowerCAmelCase = XLMForQuestionAnsweringSimple(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase ) _lowerCAmelCase = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Tuple: _lowerCAmelCase = XLMForQuestionAnswering(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) _lowerCAmelCase = model( _lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , p_mask=_lowerCAmelCase , ) _lowerCAmelCase = model( _lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , ) ((_lowerCAmelCase) , ) = result_with_labels.to_tuple() _lowerCAmelCase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase ) ((_lowerCAmelCase) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = XLMForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: _lowerCAmelCase = self.num_labels _lowerCAmelCase = XLMForTokenClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = self.num_choices _lowerCAmelCase = XLMForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : int = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase : Tuple = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __lowerCamelCase : List[Any] = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) return inputs_dict def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = XLMModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , emb_dim=37 ) def _snake_case ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_lowerCAmelCase ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_lowerCAmelCase ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1 ) -> int: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertListEqual( [isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(_lowerCAmelCase ) ) self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_lowerCAmelCase ): # adds PAD dummy token _lowerCAmelCase = min_length + idx + 1 _lowerCAmelCase = min_length + idx + 1 _lowerCAmelCase = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_lowerCAmelCase ) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1 ) -> Union[str, Any]: self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertListEqual( [isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_lowerCAmelCase ) , ) self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_lowerCAmelCase ): # adds PAD dummy token _lowerCAmelCase = min_length + idx + 1 _lowerCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_lowerCAmelCase ) , ) pass @slow def _snake_case ( self ) -> str: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = XLMModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @slow def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(_lowerCAmelCase ) _lowerCAmelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=_lowerCAmelCase ) # the president _lowerCAmelCase = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference _lowerCAmelCase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _lowerCAmelCase )
18
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "donut-swin" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = "falcon" __lowerCamelCase : List[str] = ["past_key_values"] def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase ) _lowerCAmelCase = hidden_size if n_embed is None else n_embed _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCAmelCase = alibi _lowerCAmelCase = new_decoder_architecture _lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True _lowerCAmelCase = parallel_attn _lowerCAmelCase = bias super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) @property def _snake_case ( self ) -> Optional[Any]: return self.hidden_size // self.num_attention_heads @property def _snake_case ( self ) -> Optional[Any]: return not self.alibi
18
1
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } _SCREAMING_SNAKE_CASE = { "allenai/led-base-16384": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __a(): '''simple docstring''' _lowerCAmelCase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) _lowerCAmelCase = bs[:] _lowerCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 _lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase = set() _lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowerCAmelCase = char return pairs class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES __lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : str = ["input_ids", "attention_mask"] def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> str: _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token super().__init__( errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , ) with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle: _lowerCAmelCase = json.load(_lowerCAmelCase ) _lowerCAmelCase = {v: k for k, v in self.encoder.items()} _lowerCAmelCase = errors # how to handle errors in decoding _lowerCAmelCase = bytes_to_unicode() _lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCAmelCase , encoding="utf-8" ) as merges_handle: _lowerCAmelCase = merges_handle.read().split("\n" )[1:-1] _lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] _lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) _lowerCAmelCase = {} _lowerCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _lowerCAmelCase = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self ) -> Union[str, Any]: return len(self.encoder ) def _snake_case ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]: if token in self.cache: return self.cache[token] _lowerCAmelCase = tuple(_lowerCAmelCase ) _lowerCAmelCase = get_pairs(_lowerCAmelCase ) if not pairs: return token while True: _lowerCAmelCase = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break _lowerCAmelCase , _lowerCAmelCase = bigram _lowerCAmelCase = [] _lowerCAmelCase = 0 while i < len(_lowerCAmelCase ): try: _lowerCAmelCase = word.index(_lowerCAmelCase , _lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _lowerCAmelCase = j if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _lowerCAmelCase = tuple(_lowerCAmelCase ) _lowerCAmelCase = new_word if len(_lowerCAmelCase ) == 1: break else: _lowerCAmelCase = get_pairs(_lowerCAmelCase ) _lowerCAmelCase = " ".join(_lowerCAmelCase ) _lowerCAmelCase = word return word def _snake_case ( self , _lowerCAmelCase ) -> str: _lowerCAmelCase = [] for token in re.findall(self.pat , _lowerCAmelCase ): _lowerCAmelCase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self , _lowerCAmelCase ) -> Dict: return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self , _lowerCAmelCase ) -> List[Any]: return self.decoder.get(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = "".join(_lowerCAmelCase ) _lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" ) _lowerCAmelCase = 0 with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) _lowerCAmelCase = token_index writer.write(" ".join(_lowerCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] _lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=False , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()): _lowerCAmelCase = " " + text return (text, kwargs) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> dict: _lowerCAmelCase = super()._pad( encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) # Load from model defaults if return_attention_mask is None: _lowerCAmelCase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _lowerCAmelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _lowerCAmelCase = len(encoded_inputs["global_attention_mask"] ) != len(_lowerCAmelCase ) if needs_to_be_padded: _lowerCAmelCase = len(_lowerCAmelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _lowerCAmelCase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": _lowerCAmelCase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
18
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "deit" def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = qkv_bias _lowerCAmelCase = encoder_stride class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = version.parse("1.11" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ) -> float: return 1E-4
18
1
'''simple docstring''' import math def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' _lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : float = 1 / 12345 ): '''simple docstring''' _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = 3 while True: _lowerCAmelCase = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) total_partitions += 1 if check_partition_perfect(SCREAMING_SNAKE_CASE_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(SCREAMING_SNAKE_CASE_ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
18
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
1
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _SCREAMING_SNAKE_CASE = get_logger(__name__) _SCREAMING_SNAKE_CASE = Path(__file__).parent / "model_card_template.md" _SCREAMING_SNAKE_CASE = uuida().hex _SCREAMING_SNAKE_CASE = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES _SCREAMING_SNAKE_CASE = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES _SCREAMING_SNAKE_CASE = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" def __a(SCREAMING_SNAKE_CASE_ : Union[Dict, str, None] = None ): '''simple docstring''' _lowerCAmelCase = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F'''; torch/{_torch_version}''' if is_flax_available(): ua += F'''; jax/{_jax_version}''' ua += F'''; flax/{_flax_version}''' if is_onnx_available(): ua += F'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): ua += "; " + user_agent return ua def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None ): '''simple docstring''' if token is None: _lowerCAmelCase = HfFolder.get_token() if organization is None: _lowerCAmelCase = whoami(SCREAMING_SNAKE_CASE_ )["name"] return F'''{username}/{model_id}''' else: return F'''{organization}/{model_id}''' def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(SCREAMING_SNAKE_CASE_ , "local_rank" ) and args.local_rank not in [-1, 0]: return _lowerCAmelCase = args.hub_token if hasattr(SCREAMING_SNAKE_CASE_ , "hub_token" ) else None _lowerCAmelCase = get_full_repo_name(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , repo_name=SCREAMING_SNAKE_CASE_ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE_ , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) _lowerCAmelCase = os.path.join(args.output_dir , "README.md" ) model_card.save(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash _lowerCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).as_posix() ) _lowerCAmelCase = re.search(R"snapshots/([^/]+)/" , SCREAMING_SNAKE_CASE_ ) if search is None: return None _lowerCAmelCase = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _SCREAMING_SNAKE_CASE = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) _SCREAMING_SNAKE_CASE = os.path.join(hf_cache_home, "diffusers") def __a(SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None ): '''simple docstring''' if new_cache_dir is None: _lowerCAmelCase = DIFFUSERS_CACHE if old_cache_dir is None: _lowerCAmelCase = old_diffusers_cache _lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ ).expanduser() _lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): _lowerCAmelCase = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE_ ) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) os.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) try: os.symlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _SCREAMING_SNAKE_CASE = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): _SCREAMING_SNAKE_CASE = 0 else: with open(cache_version_file) as f: try: _SCREAMING_SNAKE_CASE = int(f.read()) except ValueError: _SCREAMING_SNAKE_CASE = 0 if cache_version < 1: _SCREAMING_SNAKE_CASE = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: _SCREAMING_SNAKE_CASE = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' "the directory exists and can be written to." ) def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ): '''simple docstring''' if variant is not None: _lowerCAmelCase = weights_name.split("." ) _lowerCAmelCase = splits[:-1] + [variant] + splits[-1:] _lowerCAmelCase = ".".join(SCREAMING_SNAKE_CASE_ ) return weights_name def __a(SCREAMING_SNAKE_CASE_ : List[Any] , *, SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=None , ): '''simple docstring''' _lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE_ ): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): # Load from a PyTorch checkpoint _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return model_file else: raise EnvironmentError( F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse("0.20.0" ) ): try: _lowerCAmelCase = hf_hub_download( SCREAMING_SNAKE_CASE_ , filename=_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , ) warnings.warn( F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE_ , ) return model_file except: # noqa: E722 warnings.warn( F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE_ , ) try: # 2. Load model file as usual _lowerCAmelCase = hf_hub_download( SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' F''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' F'''containing a file named {weights_name}''' )
18
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = size if size is not None else {"shortest_edge": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCAmelCase = int((256 / 224) * size["shortest_edge"] ) _lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
18
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = "layoutlmv3" def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=1024 , _lowerCAmelCase=128 , _lowerCAmelCase=128 , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=128 , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=224 , _lowerCAmelCase=3 , _lowerCAmelCase=16 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__( vocab_size=_lowerCAmelCase , hidden_size=_lowerCAmelCase , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , intermediate_size=_lowerCAmelCase , hidden_act=_lowerCAmelCase , hidden_dropout_prob=_lowerCAmelCase , attention_probs_dropout_prob=_lowerCAmelCase , max_position_embeddings=_lowerCAmelCase , type_vocab_size=_lowerCAmelCase , initializer_range=_lowerCAmelCase , layer_norm_eps=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) _lowerCAmelCase = max_ad_position_embeddings _lowerCAmelCase = coordinate_size _lowerCAmelCase = shape_size _lowerCAmelCase = has_relative_attention_bias _lowerCAmelCase = rel_pos_bins _lowerCAmelCase = max_rel_pos _lowerCAmelCase = has_spatial_attention_bias _lowerCAmelCase = rel_ad_pos_bins _lowerCAmelCase = max_rel_ad_pos _lowerCAmelCase = text_embed _lowerCAmelCase = visual_embed _lowerCAmelCase = input_size _lowerCAmelCase = num_channels _lowerCAmelCase = patch_size _lowerCAmelCase = classifier_dropout class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = version.parse("1.12" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def _snake_case ( self ) -> float: return 1E-5 @property def _snake_case ( self ) -> int: return 12 def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 3 , _lowerCAmelCase = 40 , _lowerCAmelCase = 40 , ) -> Mapping[str, Any]: setattr(processor.image_processor , "apply_ocr" , _lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = processor.tokenizer.num_special_tokens_to_add(_lowerCAmelCase ) _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _lowerCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _lowerCAmelCase = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = dict( processor( _lowerCAmelCase , text=_lowerCAmelCase , boxes=_lowerCAmelCase , return_tensors=_lowerCAmelCase , ) ) return inputs
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "donut-swin" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
18
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _SCREAMING_SNAKE_CASE = 25_00_04 _SCREAMING_SNAKE_CASE = 25_00_20 @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ): __lowerCamelCase : Any = MBartaaTokenizer __lowerCamelCase : List[Any] = MBartaaTokenizerFast __lowerCamelCase : List[Any] = True __lowerCamelCase : str = True def _snake_case ( self ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase = MBartaaTokenizer(_lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=_lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = "<s>" _lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase ) def _snake_case ( self ) -> str: _lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_lowerCAmelCase ) , 1054 ) def _snake_case ( self ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def _snake_case ( self ) -> str: _lowerCAmelCase = MBartaaTokenizer(_lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.tokenize("This is a test" ) self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) _lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def _snake_case ( self ) -> Tuple: # fmt: off _lowerCAmelCase = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def _snake_case ( self ) -> Dict: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = tokenizer_r.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) _lowerCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCAmelCase = tokenizer_r.from_pretrained(_lowerCAmelCase ) _lowerCAmelCase = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowerCAmelCase ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase ) _lowerCAmelCase = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCAmelCase = tokenizer_r.from_pretrained(_lowerCAmelCase ) _lowerCAmelCase = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase ) _lowerCAmelCase = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase = tokenizer_r.from_pretrained(_lowerCAmelCase ) _lowerCAmelCase = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): __lowerCamelCase : List[str] = "facebook/mbart-large-50-one-to-many-mmt" __lowerCamelCase : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __lowerCamelCase : Dict = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __lowerCamelCase : Optional[Any] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def _snake_case ( cls ) -> List[str]: _lowerCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) _lowerCAmelCase = 1 return cls def _snake_case ( self ) -> Any: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase ) def _snake_case ( self ) -> int: self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids ) _lowerCAmelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _lowerCAmelCase = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , _lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0] self.assertEqual(ids[0] , _lowerCAmelCase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] ) def _snake_case ( self ) -> str: _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = MBartaaTokenizer.from_pretrained(_lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase ) @require_torch def _snake_case ( self ) -> str: _lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors="pt" ) _lowerCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) _lowerCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _snake_case ( self ) -> str: _lowerCAmelCase = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors="pt" ) _lowerCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors="pt" ) _lowerCAmelCase = targets["input_ids"] _lowerCAmelCase = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _snake_case ( self ) -> str: _lowerCAmelCase = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { # en_XX, A, test, EOS "input_ids": [[250004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "swinv2" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) _lowerCAmelCase = (0, 0, 0, 0)
18
1
'''simple docstring''' from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : bool = False ): '''simple docstring''' if radian_mode: return [magnitude * cos(SCREAMING_SNAKE_CASE_ ), magnitude * sin(SCREAMING_SNAKE_CASE_ )] return [magnitude * cos(radians(SCREAMING_SNAKE_CASE_ ) ), magnitude * sin(radians(SCREAMING_SNAKE_CASE_ ) )] def __a(SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : float = 10**-1 ): '''simple docstring''' _lowerCAmelCase = cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = sum(SCREAMING_SNAKE_CASE_ ) return abs(SCREAMING_SNAKE_CASE_ ) < eps if __name__ == "__main__": # Test to check if it works _SCREAMING_SNAKE_CASE = array( [ polar_force(718.4, 1_80 - 30), polar_force(879.54, 45), polar_force(1_00, -90), ] ) _SCREAMING_SNAKE_CASE = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _SCREAMING_SNAKE_CASE = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) _SCREAMING_SNAKE_CASE = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _SCREAMING_SNAKE_CASE = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) _SCREAMING_SNAKE_CASE = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
18
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[Any] = AutoencoderKL __lowerCamelCase : List[Any] = "sample" __lowerCamelCase : Tuple = 1e-2 @property def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = 4 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) return {"sample": image} @property def _snake_case ( self ) -> Any: return (3, 32, 32) @property def _snake_case ( self ) -> List[Any]: return (3, 32, 32) def _snake_case ( self ) -> str: _lowerCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Any: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _snake_case ( self ) -> str: # enable deterministic behavior for gradient checkpointing _lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) model.to(_lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training _lowerCAmelCase = model(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _lowerCAmelCase = torch.randn_like(_lowerCAmelCase ) _lowerCAmelCase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _lowerCAmelCase = model_a(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _lowerCAmelCase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) _lowerCAmelCase = dict(model.named_parameters() ) _lowerCAmelCase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_lowerCAmelCase ) _lowerCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _snake_case ( self ) -> Dict: _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) _lowerCAmelCase = model.to(_lowerCAmelCase ) model.eval() if torch_device == "mps": _lowerCAmelCase = torch.manual_seed(0 ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _lowerCAmelCase = image.to(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample _lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _lowerCAmelCase = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": _lowerCAmelCase = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: _lowerCAmelCase = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy''' def _snake_case ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase ) return image def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = "fp16" if fpaa else None _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = AutoencoderKL.from_pretrained( _lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , ) model.to(_lowerCAmelCase ).eval() return model def _snake_case ( self , _lowerCAmelCase=0 ) -> str: if torch_device == "mps": return torch.manual_seed(_lowerCAmelCase ) return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist _lowerCAmelCase = dist.sample(generator=_lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) _lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
18
1
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } _SCREAMING_SNAKE_CASE = {"mgp-str": 27} class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = VOCAB_FILES_NAMES __lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCAmelCase , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[s]" , _lowerCAmelCase="[GO]" , **_lowerCAmelCase ) -> Union[str, Any]: super().__init__( unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase , ) with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle: _lowerCAmelCase = json.load(_lowerCAmelCase ) _lowerCAmelCase = {v: k for k, v in self.vocab.items()} @property def _snake_case ( self ) -> Union[str, Any]: return len(self.vocab ) def _snake_case ( self ) -> int: return dict(self.vocab , **self.added_tokens_encoder ) def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = [] for s in text: char_tokens.extend(_lowerCAmelCase ) return char_tokens def _snake_case ( self , _lowerCAmelCase ) -> Tuple: return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) ) def _snake_case ( self , _lowerCAmelCase ) -> str: return self.decoder.get(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error("Vocabulary path ({}) should be a directory".format(_lowerCAmelCase ) ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" ) return (vocab_file,)
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = "gpt_bigcode" __lowerCamelCase : Optional[int] = ["past_key_values"] __lowerCamelCase : List[str] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = n_inner _lowerCAmelCase = activation_function _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = attn_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = scale_attn_weights _lowerCAmelCase = use_cache _lowerCAmelCase = attention_softmax_in_fpaa _lowerCAmelCase = scale_attention_softmax_in_fpaa _lowerCAmelCase = multi_query _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' import math def __a(SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' _lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) ) _lowerCAmelCase = 0 while arr[min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - 1] < x: _lowerCAmelCase = step step += int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) ) if prev >= n: return -1 while arr[prev] < x: _lowerCAmelCase = prev + 1 if prev == min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip() _SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(",")] _SCREAMING_SNAKE_CASE = int(input("Enter the number to be searched:\n")) _SCREAMING_SNAKE_CASE = jump_search(arr, x) if res == -1: print("Number not found!") else: print(f'''Number {x} is at index {res}''')
18
'''simple docstring''' import math from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = "data2vec-audio" def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = feat_extract_activation _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = conv_bias _lowerCAmelCase = num_conv_pos_embeddings _lowerCAmelCase = num_conv_pos_embedding_groups _lowerCAmelCase = conv_pos_kernel_size _lowerCAmelCase = len(self.conv_dim ) _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = feat_proj_dropout _lowerCAmelCase = final_dropout _lowerCAmelCase = layerdrop _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = vocab_size _lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase = mask_time_prob _lowerCAmelCase = mask_time_length _lowerCAmelCase = mask_time_min_masks _lowerCAmelCase = mask_feature_prob _lowerCAmelCase = mask_feature_length _lowerCAmelCase = mask_feature_min_masks # ctc loss _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # adapter _lowerCAmelCase = add_adapter _lowerCAmelCase = adapter_kernel_size _lowerCAmelCase = adapter_stride _lowerCAmelCase = num_adapter_layers _lowerCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = xvector_output_dim @property def _snake_case ( self ) -> str: return math.prod(self.conv_stride )
18
1
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _SCREAMING_SNAKE_CASE = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , ): '''simple docstring''' if attention_mask is None: _lowerCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowerCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowerCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ) -> Optional[int]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = eos_token_id _lowerCAmelCase = pad_token_id _lowerCAmelCase = bos_token_id _lowerCAmelCase = initializer_range def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowerCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowerCAmelCase = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowerCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , ) _lowerCAmelCase = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def _snake_case ( self ) -> List[str]: _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = 20 _lowerCAmelCase = model_class_name(_lowerCAmelCase ) _lowerCAmelCase = model.encode(inputs_dict["input_ids"] ) _lowerCAmelCase , _lowerCAmelCase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) _lowerCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCAmelCase = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _lowerCAmelCase = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , ) _lowerCAmelCase = model.decode(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: _lowerCAmelCase = 20 _lowerCAmelCase = model_class_name(_lowerCAmelCase ) _lowerCAmelCase = model.encode(inputs_dict["input_ids"] ) _lowerCAmelCase , _lowerCAmelCase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _lowerCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCAmelCase = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _lowerCAmelCase = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowerCAmelCase = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase ) _lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class lowerCAmelCase_ ( unittest.TestCase ): __lowerCamelCase : str = 99 def _snake_case ( self ) -> Dict: _lowerCAmelCase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _lowerCAmelCase = input_ids.shape[0] _lowerCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._get_config_and_data() _lowerCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowerCAmelCase = lm_model(input_ids=_lowerCAmelCase ) _lowerCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _lowerCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowerCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _lowerCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _lowerCAmelCase = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) _lowerCAmelCase = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , _lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _lowerCAmelCase = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowerCAmelCase = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() _lowerCAmelCase = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_lowerCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ,__magic_name__ ): __lowerCamelCase : List[Any] = True __lowerCamelCase : List[str] = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) __lowerCamelCase : Optional[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = FlaxBlenderbotSmallModelTester(self ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = model_class(_lowerCAmelCase ) @jax.jit def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ): return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) with self.subTest("JIT Enabled" ): _lowerCAmelCase = encode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _lowerCAmelCase = encode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCAmelCase = model_class(_lowerCAmelCase ) _lowerCAmelCase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) _lowerCAmelCase = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): return model.decode( decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , ) with self.subTest("JIT Enabled" ): _lowerCAmelCase = decode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _lowerCAmelCase = decode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _snake_case ( self ) -> Dict: for model_class_name in self.all_model_classes: _lowerCAmelCase = model_class_name.from_pretrained("facebook/blenderbot_small-90M" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowerCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id _lowerCAmelCase = model(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase )
18
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = (DDPMParallelScheduler,) def _snake_case ( self , **_lowerCAmelCase ) -> int: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase ) def _snake_case ( self ) -> Any: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: self.check_over_configs(thresholding=_lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , ) def _snake_case ( self ) -> int: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Dict: for t in [0, 500, 999]: self.check_over_forward(time_step=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = self.dummy_sample_deter + 0.1 _lowerCAmelCase = self.dummy_sample_deter - 0.1 _lowerCAmelCase = samplea.shape[0] _lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) _lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase ) _lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_lowerCAmelCase ) _lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(_lowerCAmelCase ): if i == len(_lowerCAmelCase ) - 1: _lowerCAmelCase = -1 else: _lowerCAmelCase = timesteps[i + 1] _lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase ) _lowerCAmelCase = prev_t.item() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] _lowerCAmelCase = len(_lowerCAmelCase ) with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=_lowerCAmelCase )
18
1
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCAmelCase_ ( nn.Module ): __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : float = 0.0 __lowerCamelCase : int = 1 __lowerCamelCase : int = 1 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> Dict: _lowerCAmelCase = [] _lowerCAmelCase = [] for i in range(self.num_layers ): _lowerCAmelCase = self.in_channels if i == 0 else self.out_channels _lowerCAmelCase = FlaxResnetBlockaD( in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowerCAmelCase ) _lowerCAmelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_lowerCAmelCase ) _lowerCAmelCase = resnets _lowerCAmelCase = attentions if self.add_downsample: _lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> Any: _lowerCAmelCase = () for resnet, attn in zip(self.resnets , self.attentions ): _lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) _lowerCAmelCase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _lowerCAmelCase = self.downsamplers_a(_lowerCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase_ ( nn.Module ): __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : float = 0.0 __lowerCamelCase : int = 1 __lowerCamelCase : bool = True __lowerCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = [] for i in range(self.num_layers ): _lowerCAmelCase = self.in_channels if i == 0 else self.out_channels _lowerCAmelCase = FlaxResnetBlockaD( in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowerCAmelCase ) _lowerCAmelCase = resnets if self.add_downsample: _lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> str: _lowerCAmelCase = () for resnet in self.resnets: _lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _lowerCAmelCase = self.downsamplers_a(_lowerCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase_ ( nn.Module ): __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : float = 0.0 __lowerCamelCase : int = 1 __lowerCamelCase : int = 1 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> int: _lowerCAmelCase = [] _lowerCAmelCase = [] for i in range(self.num_layers ): _lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels _lowerCAmelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowerCAmelCase ) _lowerCAmelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_lowerCAmelCase ) _lowerCAmelCase = resnets _lowerCAmelCase = attentions if self.add_upsample: _lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> Any: for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _lowerCAmelCase = res_hidden_states_tuple[-1] _lowerCAmelCase = res_hidden_states_tuple[:-1] _lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) _lowerCAmelCase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) if self.add_upsample: _lowerCAmelCase = self.upsamplers_a(_lowerCAmelCase ) return hidden_states class lowerCAmelCase_ ( nn.Module ): __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : float = 0.0 __lowerCamelCase : int = 1 __lowerCamelCase : bool = True __lowerCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = [] for i in range(self.num_layers ): _lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels _lowerCAmelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowerCAmelCase ) _lowerCAmelCase = resnets if self.add_upsample: _lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> Optional[Any]: for resnet in self.resnets: # pop res hidden states _lowerCAmelCase = res_hidden_states_tuple[-1] _lowerCAmelCase = res_hidden_states_tuple[:-1] _lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) if self.add_upsample: _lowerCAmelCase = self.upsamplers_a(_lowerCAmelCase ) return hidden_states class lowerCAmelCase_ ( nn.Module ): __lowerCamelCase : int __lowerCamelCase : float = 0.0 __lowerCamelCase : int = 1 __lowerCamelCase : int = 1 __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> str: # there is always at least one resnet _lowerCAmelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _lowerCAmelCase = [] for _ in range(self.num_layers ): _lowerCAmelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_lowerCAmelCase ) _lowerCAmelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_lowerCAmelCase ) _lowerCAmelCase = resnets _lowerCAmelCase = attentions def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> List[str]: _lowerCAmelCase = self.resnets[0](_lowerCAmelCase , _lowerCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _lowerCAmelCase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) _lowerCAmelCase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase ) return hidden_states
18
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) _lowerCAmelCase = 3 _lowerCAmelCase = do_lower_case _lowerCAmelCase = remove_space _lowerCAmelCase = keep_accents _lowerCAmelCase = vocab_file _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) _lowerCAmelCase = jieba _lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _snake_case ( self ) -> Optional[int]: return len(self.sp_model ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _lowerCAmelCase ) -> str: if self.remove_space: _lowerCAmelCase = " ".join(inputs.strip().split() ) else: _lowerCAmelCase = inputs _lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase ) _lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase = outputs.lower() return outputs def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.preprocess_text(_lowerCAmelCase ) _lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) _lowerCAmelCase = [] for piece in pieces: if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase = cur_pieces[1:] else: _lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_lowerCAmelCase ) else: new_pieces.append(_lowerCAmelCase ) return new_pieces def _snake_case ( self , _lowerCAmelCase ) -> str: return self.sp_model.PieceToId(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: return self.sp_model.IdToPiece(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip() return out_string def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is not None: return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] return ([0] * len(_lowerCAmelCase )) + [1, 1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , "wb" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
18
1
'''simple docstring''' from collections import namedtuple _SCREAMING_SNAKE_CASE = namedtuple("from_to", "from_ to") _SCREAMING_SNAKE_CASE = { "cubicmeter": from_to(1, 1), "litre": from_to(0.001, 10_00), "kilolitre": from_to(1, 1), "gallon": from_to(0.0_0454, 264.172), "cubicyard": from_to(0.7_6455, 1.3_0795), "cubicfoot": from_to(0.028, 35.3147), "cup": from_to(0.0_0023_6588, 4226.75), } def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' if from_type not in METRIC_CONVERSION: raise ValueError( F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n''' + ", ".join(SCREAMING_SNAKE_CASE_ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n''' + ", ".join(SCREAMING_SNAKE_CASE_ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
18
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets _SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _snake_case ( self ) -> Tuple: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]: _lowerCAmelCase = mean_squared_error( _lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase ) return {"mse": mse}
18
1
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase_ ( unittest.TestCase ): @property def _snake_case ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model @property def _snake_case ( self ) -> Optional[int]: torch.manual_seed(0 ) _lowerCAmelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , ) return model @property def _snake_case ( self ) -> Any: torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.dummy_uncond_unet _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = self.dummy_vq_model _lowerCAmelCase = LDMPipeline(unet=_lowerCAmelCase , vqvae=_lowerCAmelCase , scheduler=_lowerCAmelCase ) ldm.to(_lowerCAmelCase ) ldm.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" ).images _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=_lowerCAmelCase )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) _lowerCAmelCase = 1E-2 if torch_device != "mps" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: _lowerCAmelCase = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" ) ldm.to(_lowerCAmelCase ) ldm.set_progress_bar_config(disable=_lowerCAmelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=5 , output_type="numpy" ).images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCAmelCase = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] ) _lowerCAmelCase = 1E-2 if torch_device != "mps" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
18
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Tuple = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) __lowerCamelCase : int = "CIDAS/clipseg-rd64-refined" __lowerCamelCase : Union[str, Any] = "image_segmenter" __lowerCamelCase : Dict = CLIPSegForImageSegmentation __lowerCamelCase : Tuple = ["image", "text"] __lowerCamelCase : Optional[int] = ["image"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["vision"] ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: return self.pre_processor(text=[label] , images=[image] , padding=_lowerCAmelCase , return_tensors="pt" ) def _snake_case ( self , _lowerCAmelCase ) -> Dict: with torch.no_grad(): _lowerCAmelCase = self.model(**_lowerCAmelCase ).logits return logits def _snake_case ( self , _lowerCAmelCase ) -> int: _lowerCAmelCase = outputs.cpu().detach().numpy() _lowerCAmelCase = 0 _lowerCAmelCase = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ): '''simple docstring''' if nth_term == "": return [""] _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE_ ) ): series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series")) _SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series")) print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power))
18
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = "lilt" def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase="absolute" , _lowerCAmelCase=None , _lowerCAmelCase=4 , _lowerCAmelCase=1024 , **_lowerCAmelCase , ) -> Dict: super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = position_embedding_type _lowerCAmelCase = classifier_dropout _lowerCAmelCase = channel_shrink_ratio _lowerCAmelCase = max_ad_position_embeddings
18
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] )
18
1
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=None , _lowerCAmelCase=2 , ) -> int: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = scope _lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _lowerCAmelCase = (image_size // patch_size) ** 2 _lowerCAmelCase = num_patches + 2 def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def _snake_case ( self ) -> Union[str, Any]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = DeiTModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = DeiTForMaskedImageModeling(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase = 1 _lowerCAmelCase = DeiTForMaskedImageModeling(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.type_sequence_label_size _lowerCAmelCase = DeiTForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase = 1 _lowerCAmelCase = DeiTForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Tuple = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __lowerCamelCase : int = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __lowerCamelCase : Dict = False __lowerCamelCase : List[Any] = False __lowerCamelCase : Union[str, Any] = False def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = DeiTModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def _snake_case ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def _snake_case ( self ) -> List[Any]: pass def _snake_case ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def _snake_case ( self ) -> Dict: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> int: _lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _snake_case ( self ) -> Tuple: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_lowerCAmelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() _lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) _lowerCAmelCase = model(**_lowerCAmelCase ).loss loss.backward() def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _lowerCAmelCase = False _lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(_lowerCAmelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue _lowerCAmelCase = model_class(_lowerCAmelCase ) model.gradient_checkpointing_enable() model.to(_lowerCAmelCase ) model.train() _lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) _lowerCAmelCase = model(**_lowerCAmelCase ).loss loss.backward() def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_lowerCAmelCase ), *get_values(_lowerCAmelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ): _lowerCAmelCase = problem_type["title"] _lowerCAmelCase = problem_type["num_labels"] _lowerCAmelCase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() _lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if problem_type["num_labels"] > 1: _lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) _lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_lowerCAmelCase ) as warning_list: _lowerCAmelCase = model(**_lowerCAmelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def _snake_case ( self ) -> Any: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = DeiTModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def __a(): '''simple docstring''' _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> Any: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _lowerCAmelCase ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(**_lowerCAmelCase ) # verify the logits _lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) _lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )
18
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = result.headers["Location"] _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' ) with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp: fp.write(response.content ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = None with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(SCREAMING_SNAKE_CASE_ ) as f: for line in f: _lowerCAmelCase = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _lowerCAmelCase = line[: line.index(": " )] _lowerCAmelCase = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _lowerCAmelCase = line[len("FAILED " ) :] failed_tests.append(SCREAMING_SNAKE_CASE_ ) elif filename == "job_name.txt": _lowerCAmelCase = line if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` ''' F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' " problem." ) _lowerCAmelCase = None if job_name and job_links: _lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # A list with elements of the form (line of error, error, failed test) _lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return result def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) ) return errors def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ): '''simple docstring''' _lowerCAmelCase = Counter() counter.update([x[1] for x in logs] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: _lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = test.split("::" )[0] if test.startswith("tests/models/" ): _lowerCAmelCase = test.split("/" )[2] else: _lowerCAmelCase = None return test def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] _lowerCAmelCase = [x for x in logs if x[2] is not None] _lowerCAmelCase = {x[2] for x in logs} _lowerCAmelCase = {} for test in tests: _lowerCAmelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _lowerCAmelCase = sum(error_counts.values() ) if n_errors > 0: _lowerCAmelCase = {"count": n_errors, "errors": error_counts} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| no. | error | status |" _lowerCAmelCase = "|-:|:-|:-|" _lowerCAmelCase = [header, sep] for error in reduced_by_error: _lowerCAmelCase = reduced_by_error[error]["count"] _lowerCAmelCase = F'''| {count} | {error[:100]} | |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| model | no. of errors | major error | count |" _lowerCAmelCase = "|-:|-:|-:|-:|" _lowerCAmelCase = [header, sep] for model in reduced_by_model: _lowerCAmelCase = reduced_by_model[model]["count"] _lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0] _lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") _SCREAMING_SNAKE_CASE = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token) _SCREAMING_SNAKE_CASE = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _SCREAMING_SNAKE_CASE = k.find(" / ") _SCREAMING_SNAKE_CASE = k[index + len(" / ") :] _SCREAMING_SNAKE_CASE = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _SCREAMING_SNAKE_CASE = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _SCREAMING_SNAKE_CASE = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = reduce_by_error(errors) _SCREAMING_SNAKE_CASE = reduce_by_model(errors) _SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error) _SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
18
1
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=None , _lowerCAmelCase=2 , ) -> int: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = scope _lowerCAmelCase = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCAmelCase = (image_size // patch_size) ** 2 _lowerCAmelCase = num_patches + 1 def _snake_case ( self ) -> Dict: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def _snake_case ( self ) -> Any: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = ViTModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = ViTForMaskedImageModeling(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase = 1 _lowerCAmelCase = ViTForMaskedImageModeling(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.type_sequence_label_size _lowerCAmelCase = ViTForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase = 1 _lowerCAmelCase = ViTForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : List[Any] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) __lowerCamelCase : Tuple = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) __lowerCamelCase : Optional[Any] = True __lowerCamelCase : str = False __lowerCamelCase : List[Any] = False __lowerCamelCase : int = False def _snake_case ( self ) -> List[str]: _lowerCAmelCase = ViTModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def _snake_case ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def _snake_case ( self ) -> Any: pass def _snake_case ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowerCAmelCase ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _snake_case ( self ) -> Tuple: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = ViTModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def __a(): '''simple docstring''' _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> Any: return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def _snake_case ( self ) -> Any: _lowerCAmelCase = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(_lowerCAmelCase ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(**_lowerCAmelCase ) # verify the logits _lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def _snake_case ( self ) -> List[Any]: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. _lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" ).to(_lowerCAmelCase ) _lowerCAmelCase = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) _lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase ) # verify the logits _lowerCAmelCase = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def _snake_case ( self ) -> int: _lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) _lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase )
18
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,) __lowerCamelCase : int = (("num_inference_steps", 25),) def _snake_case ( self , **_lowerCAmelCase ) -> Any: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase , _lowerCAmelCase = sample, sample for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self ) -> int: pass def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple: if scheduler is None: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample return sample def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = 50 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 _lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> str: self.check_over_configs(thresholding=_lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , ) def _snake_case ( self ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) _lowerCAmelCase = self.full_loop( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers" def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lower_order_final=_lowerCAmelCase ) self.check_over_configs(lower_order_final=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _snake_case ( self ) -> str: self.check_over_configs(variance_type=_lowerCAmelCase ) self.check_over_configs(variance_type="learned_range" ) def _snake_case ( self ) -> int: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop() _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
18
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("fixtures") _SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json") _SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy-config.json") class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = 0 def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ).to_dict() config_dict.pop("feature_extractor_type" ) _lowerCAmelCase = WavaVecaFeatureExtractor(**_lowerCAmelCase ) # save in new folder model_config.save_pretrained(_lowerCAmelCase ) config.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ) # make sure private variable is not incorrectly saved _lowerCAmelCase = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> str: _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Tuple: with self.assertRaisesRegex( _lowerCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ): _lowerCAmelCase = AutoFeatureExtractor.from_pretrained("bert-base" ) def _snake_case ( self ) -> Union[str, Any]: with self.assertRaisesRegex( _lowerCAmelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , revision="aaaaaa" ) def _snake_case ( self ) -> Union[str, Any]: with self.assertRaisesRegex( _lowerCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _lowerCAmelCase = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def _snake_case ( self ) -> Any: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase ) _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def _snake_case ( self ) -> Union[str, Any]: try: AutoConfig.register("custom" , _lowerCAmelCase ) AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowerCAmelCase ): AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def _snake_case ( self ) -> Optional[int]: class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = True try: AutoConfig.register("custom" , _lowerCAmelCase ) AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase ) # If remote code is not set, the default is to use local _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=_lowerCAmelCase ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(_lowerCAmelCase , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' from typing import Any class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = data _lowerCAmelCase = None class lowerCAmelCase_ : def __init__( self ) -> List[str]: _lowerCAmelCase = None def _snake_case ( self ) -> Any: _lowerCAmelCase = self.head while temp is not None: print(temp.data , end=" " ) _lowerCAmelCase = temp.next print() def _snake_case ( self , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = Node(_lowerCAmelCase ) _lowerCAmelCase = self.head _lowerCAmelCase = new_node def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: if node_data_a == node_data_a: return else: _lowerCAmelCase = self.head while node_a is not None and node_a.data != node_data_a: _lowerCAmelCase = node_a.next _lowerCAmelCase = self.head while node_a is not None and node_a.data != node_data_a: _lowerCAmelCase = node_a.next if node_a is None or node_a is None: return _lowerCAmelCase , _lowerCAmelCase = node_a.data, node_a.data if __name__ == "__main__": _SCREAMING_SNAKE_CASE = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("After swapping") ll.print_list()
18
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text
18
1
'''simple docstring''' import math class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase=0 ) -> str: # a graph with Node 0,1,...,N-1 _lowerCAmelCase = n _lowerCAmelCase = [ [math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase ) ] # adjacency matrix for weight _lowerCAmelCase = [ [math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase ) ] # dp[i][j] stores minimum distance from i to j def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = w def _snake_case ( self ) -> Optional[Any]: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _lowerCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Any: return self.dp[u][v] if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
18
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "blenderbot-small" __lowerCamelCase : Optional[Any] = ["past_key_values"] __lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict: _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = use_cache _lowerCAmelCase = encoder_layers _lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) class lowerCAmelCase_ ( __magic_name__ ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase = {0: "batch"} _lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} else: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super().outputs else: _lowerCAmelCase = super(_lowerCAmelCase , self ).outputs if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Generate decoder inputs _lowerCAmelCase = seq_length if not self.use_past else 1 _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape _lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1] _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = decoder_seq_length + 3 _lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers _lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), ) ) # TODO: test this. _lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_lowerCAmelCase , _lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowerCAmelCase = seqlen + 2 _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = common_inputs["attention_mask"].dtype _lowerCAmelCase = torch.cat( [common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase ) ] return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase ) _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) elif self.task == "causal-lm": _lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) else: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
18
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _SCREAMING_SNAKE_CASE = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize _SCREAMING_SNAKE_CASE = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" _SCREAMING_SNAKE_CASE = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" _SCREAMING_SNAKE_CASE = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]: import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0.9 , _lowerCAmelCase=3 , _lowerCAmelCase=0.5 ) -> int: if NLTK_VERSION >= version.Version("3.6.5" ): _lowerCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(_lowerCAmelCase ) , word_tokenize(_lowerCAmelCase ) , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , gamma=_lowerCAmelCase ) for ref, pred in zip(_lowerCAmelCase , _lowerCAmelCase ) ] else: _lowerCAmelCase = [ meteor_score.single_meteor_score(_lowerCAmelCase , _lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , gamma=_lowerCAmelCase ) for ref, pred in zip(_lowerCAmelCase , _lowerCAmelCase ) ] return {"meteor": np.mean(_lowerCAmelCase )}
18
'''simple docstring''' import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" _SCREAMING_SNAKE_CASE = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str: if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] ) _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] ) else: _lowerCAmelCase = np.asarray(_lowerCAmelCase ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) if ignore_case: _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: _lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) if ignore_numbers: _lowerCAmelCase = string.digits.maketrans("" , "" , string.digits ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
18
1
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ): '''simple docstring''' if nth_term == "": return [""] _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE_ ) ): series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series")) _SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series")) print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power))
18
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=False ): '''simple docstring''' try: _lowerCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _lowerCAmelCase = default else: # KEY is set, convert it to True or False. try: _lowerCAmelCase = strtobool(SCREAMING_SNAKE_CASE_ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value _SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_SLOW", default=False) _SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_REMOTE", default=False) _SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_LOCAL", default=True) _SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression _SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") _SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") _SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio _SCREAMING_SNAKE_CASE = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam _SCREAMING_SNAKE_CASE = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility _SCREAMING_SNAKE_CASE = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows _SCREAMING_SNAKE_CASE = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' try: import faiss # noqa except ImportError: _lowerCAmelCase = unittest.skip("test requires faiss" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' try: import regex # noqa except ImportError: _lowerCAmelCase = unittest.skip("test requires regex" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' try: import elasticsearch # noqa except ImportError: _lowerCAmelCase = unittest.skip("test requires elasticsearch" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' try: import sqlalchemy # noqa except ImportError: _lowerCAmelCase = unittest.skip("test requires sqlalchemy" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' if not config.TORCH_AVAILABLE: _lowerCAmelCase = unittest.skip("test requires PyTorch" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' if not config.TF_AVAILABLE: _lowerCAmelCase = unittest.skip("test requires TensorFlow" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Tuple ): '''simple docstring''' if not config.JAX_AVAILABLE: _lowerCAmelCase = unittest.skip("test requires JAX" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' if not config.PIL_AVAILABLE: _lowerCAmelCase = unittest.skip("test requires Pillow" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers" )(SCREAMING_SNAKE_CASE_ ) else: return test_case def __a(SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken" )(SCREAMING_SNAKE_CASE_ ) else: return test_case def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ ) else: return test_case def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' def _require_spacy_model(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): try: import spacy # noqa F401 spacy.load(SCREAMING_SNAKE_CASE_ ) except ImportError: return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ ) except OSError: return unittest.skip("test requires spacy model '{}'".format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ ) else: return test_case return _require_spacy_model def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark" )(SCREAMING_SNAKE_CASE_ ) else: return test_case def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark" )(SCREAMING_SNAKE_CASE_ ) else: return test_case def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: _lowerCAmelCase = unittest.skip("test is slow" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: _lowerCAmelCase = unittest.skip("test is local" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: _lowerCAmelCase = unittest.skip("test is packaged" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: _lowerCAmelCase = unittest.skip("test requires remote" )(SCREAMING_SNAKE_CASE_ ) return test_case def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' def decorate(cls : Any ): for name, fn in cls.__dict__.items(): if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith("test" ): for decorator in decorators: _lowerCAmelCase = decorator(SCREAMING_SNAKE_CASE_ ) setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return cls return decorate class lowerCAmelCase_ ( __magic_name__ ): pass class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = 0 __lowerCamelCase : Any = 1 __lowerCamelCase : Optional[int] = 2 @contextmanager def __a(SCREAMING_SNAKE_CASE_ : List[str]=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE_ : Dict=1e-16 ): '''simple docstring''' _lowerCAmelCase = requests.Session().request def timeout_request(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): # Change the url to an invalid url so that the connection hangs _lowerCAmelCase = "https://10.255.255.1" if kwargs.get("timeout" ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) _lowerCAmelCase = timeout try: return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier _lowerCAmelCase = url _lowerCAmelCase = e.args[0] _lowerCAmelCase = (max_retry_error.args[0].replace("10.255.255.1" , F'''OfflineMock[{url}]''' ),) _lowerCAmelCase = (max_retry_error,) raise def raise_connection_error(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : int ): raise requests.ConnectionError("Offline mode is enabled." , request=SCREAMING_SNAKE_CASE_ ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , SCREAMING_SNAKE_CASE_ ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , SCREAMING_SNAKE_CASE_ ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum." ) @contextmanager def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir: try: os.chdir(SCREAMING_SNAKE_CASE_ ) yield finally: os.chdir(SCREAMING_SNAKE_CASE_ ) @contextmanager def __a(): '''simple docstring''' import gc gc.collect() _lowerCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __a(): '''simple docstring''' import gc gc.collect() _lowerCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ): '''simple docstring''' return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): try: return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) except HTTPError as err: if str(SCREAMING_SNAKE_CASE_ ).startswith("500" ) or str(SCREAMING_SNAKE_CASE_ ).startswith("502" ): pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) ) raise err return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = returncode _lowerCAmelCase = stdout _lowerCAmelCase = stderr async def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' while True: _lowerCAmelCase = await stream.readline() if line: callback(SCREAMING_SNAKE_CASE_ ) else: break async def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Tuple=False ): '''simple docstring''' if echo: print("\nRunning: " , " ".join(SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _lowerCAmelCase = [] _lowerCAmelCase = [] def tee(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int="" ): _lowerCAmelCase = line.decode("utf-8" ).rstrip() sink.append(SCREAMING_SNAKE_CASE_ ) if not quiet: print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label="stdout:" ) ), _read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label="stderr:" ) ), ] , timeout=SCREAMING_SNAKE_CASE_ , ) return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : str=180 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : int=True ): '''simple docstring''' _lowerCAmelCase = asyncio.get_event_loop() _lowerCAmelCase = loop.run_until_complete( _stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = " ".join(SCREAMING_SNAKE_CASE_ ) if result.returncode > 0: _lowerCAmelCase = "\n".join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def __a(): '''simple docstring''' _lowerCAmelCase = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" ) _lowerCAmelCase = re.sub(R"^gw" , "" , SCREAMING_SNAKE_CASE_ , 0 , re.M ) return int(SCREAMING_SNAKE_CASE_ ) def __a(): '''simple docstring''' _lowerCAmelCase = 29500 _lowerCAmelCase = pytest_xdist_worker_id() return port + uniq_delta
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = "falcon" __lowerCamelCase : List[str] = ["past_key_values"] def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase ) _lowerCAmelCase = hidden_size if n_embed is None else n_embed _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCAmelCase = alibi _lowerCAmelCase = new_decoder_architecture _lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True _lowerCAmelCase = parallel_attn _lowerCAmelCase = bias super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) @property def _snake_case ( self ) -> Optional[Any]: return self.hidden_size // self.num_attention_heads @property def _snake_case ( self ) -> Optional[Any]: return not self.alibi
18
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[int] = LEDTokenizer __lowerCamelCase : Union[str, Any] = LEDTokenizerFast __lowerCamelCase : int = True def _snake_case ( self ) -> Tuple: super().setUp() _lowerCAmelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] _lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) _lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _lowerCAmelCase = {"unk_token": "<unk>"} _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_lowerCAmelCase ) ) def _snake_case ( self , **_lowerCAmelCase ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def _snake_case ( self , **_lowerCAmelCase ) -> Any: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Tuple: return "lower newer", "lower newer" @cached_property def _snake_case ( self ) -> Tuple: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def _snake_case ( self ) -> Tuple: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def _snake_case ( self ) -> Dict: _lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] _lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase = tokenizer(_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , return_tensors="pt" ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) @require_torch def _snake_case ( self ) -> str: _lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" ) self.assertIn("input_ids" , _lowerCAmelCase ) self.assertIn("attention_mask" , _lowerCAmelCase ) self.assertNotIn("labels" , _lowerCAmelCase ) self.assertNotIn("decoder_attention_mask" , _lowerCAmelCase ) @require_torch def _snake_case ( self ) -> Any: _lowerCAmelCase = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase = tokenizer(text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def _snake_case ( self ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase = tokenizer( ["I am a small frog" * 1024, "I am a small frog"] , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = ["A long paragraph for summarization."] _lowerCAmelCase = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) _lowerCAmelCase = tokenizer(text_target=_lowerCAmelCase , return_tensors="pt" ) _lowerCAmelCase = inputs["input_ids"] _lowerCAmelCase = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _snake_case ( self ) -> List[Any]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase = ["Summary of the text.", "Another summary."] _lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowerCAmelCase = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase ) _lowerCAmelCase = [[0] * len(_lowerCAmelCase ) for x in encoded_output["input_ids"]] _lowerCAmelCase = tokenizer.pad(_lowerCAmelCase ) self.assertSequenceEqual(outputs["global_attention_mask"] , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: pass def _snake_case ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = "A, <mask> AllenNLP sentence." _lowerCAmelCase = tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase ) _lowerCAmelCase = tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) _lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) _lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( _lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( _lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
18
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "deit" def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = qkv_bias _lowerCAmelCase = encoder_stride class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = version.parse("1.11" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ) -> float: return 1E-4
18
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 _SCREAMING_SNAKE_CASE = { # 1536-bit 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 2048-bit 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 3072-bit 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 4096-bit 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 6144-bit 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 8192-bit 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, } class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase = 14 ) -> None: if group not in primes: raise ValueError("Unsupported Group" ) _lowerCAmelCase = primes[group]["prime"] _lowerCAmelCase = primes[group]["generator"] _lowerCAmelCase = int(hexlify(urandom(32 ) ) , base=16 ) def _snake_case ( self ) -> str: return hex(self.__private_key )[2:] def _snake_case ( self ) -> str: _lowerCAmelCase = pow(self.generator , self.__private_key , self.prime ) return hex(_lowerCAmelCase )[2:] def _snake_case ( self , _lowerCAmelCase ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1 ) def _snake_case ( self , _lowerCAmelCase ) -> str: _lowerCAmelCase = int(_lowerCAmelCase , base=16 ) if not self.is_valid_public_key(_lowerCAmelCase ): raise ValueError("Invalid public key" ) _lowerCAmelCase = pow(_lowerCAmelCase , self.__private_key , self.prime ) return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest() @staticmethod def _snake_case ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1 ) @staticmethod def _snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 14 ) -> str: _lowerCAmelCase = int(_lowerCAmelCase , base=16 ) _lowerCAmelCase = int(_lowerCAmelCase , base=16 ) _lowerCAmelCase = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError("Invalid public key" ) _lowerCAmelCase = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
18
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
1
'''simple docstring''' from math import pow def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ): '''simple docstring''' if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _lowerCAmelCase = int(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _lowerCAmelCase , _lowerCAmelCase = backtrack( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _lowerCAmelCase , _lowerCAmelCase = backtrack( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return current_sum, solutions_count def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( "Invalid input\n" "needed_sum must be between 1 and 1000, power between 2 and 10." ) return backtrack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
18
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = size if size is not None else {"shortest_edge": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCAmelCase = int((256 / 224) * size["shortest_edge"] ) _lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
18
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "donut-swin" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
18
1
'''simple docstring''' import math import os import sys def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = "" try: with open(SCREAMING_SNAKE_CASE_ , "rb" ) as binary_file: _lowerCAmelCase = binary_file.read() for dat in data: _lowerCAmelCase = F'''{dat:08b}''' result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def __a(SCREAMING_SNAKE_CASE_ : dict[str, str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' lexicon.pop(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = last_match_id if math.loga(SCREAMING_SNAKE_CASE_ ).is_integer(): for curr_key in lexicon: _lowerCAmelCase = "0" + lexicon[curr_key] _lowerCAmelCase = bin(SCREAMING_SNAKE_CASE_ )[2:] def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = {"0": "0", "1": "1"} _lowerCAmelCase , _lowerCAmelCase = "", "" _lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue _lowerCAmelCase = lexicon[curr_string] result += last_match_id add_key_to_lexicon(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) index += 1 _lowerCAmelCase = "" while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": _lowerCAmelCase = lexicon[curr_string] result += last_match_id return result def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = os.path.getsize(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = bin(SCREAMING_SNAKE_CASE_ )[2:] _lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) return "0" * (length_length - 1) + file_length_binary + compressed def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = 8 try: with open(SCREAMING_SNAKE_CASE_ , "wb" ) as opened_file: _lowerCAmelCase = [ to_write[i : i + byte_length] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(SCREAMING_SNAKE_CASE_ , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = read_file_binary(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = compress_data(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = add_file_length(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) write_file_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "swinv2" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) _lowerCAmelCase = (0, 0, 0, 0)
18
1
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel _SCREAMING_SNAKE_CASE = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_80_00, "sample_size": 6_55_36, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_80_00, "sample_size": 6_55_36, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_80_00, "sample_size": 13_10_72, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_60_00, "sample_size": 6_55_36, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_60_00, "sample_size": 6_55_36, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_60_00, "sample_size": 6_55_36, }, } def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2 def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' _lowerCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _lowerCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( __magic_name__ ): pass class lowerCAmelCase_ ( nn.Module ): def __init__( self , _lowerCAmelCase ) -> Any: super().__init__() _lowerCAmelCase = DiffusionAttnUnetaD(_lowerCAmelCase , n_attn_layers=4 ) _lowerCAmelCase = deepcopy(self.diffusion ) _lowerCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=_lowerCAmelCase ) def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' _SCREAMING_SNAKE_CASE = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } _SCREAMING_SNAKE_CASE = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } _SCREAMING_SNAKE_CASE = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } _SCREAMING_SNAKE_CASE = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } _SCREAMING_SNAKE_CASE = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } _SCREAMING_SNAKE_CASE = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif name.startswith(SCREAMING_SNAKE_CASE_ ): return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=13 ): '''simple docstring''' _lowerCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _lowerCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _lowerCAmelCase = string[6:] elif string.startswith("net." ): _lowerCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _lowerCAmelCase = string[7:] if string.startswith("main." ): _lowerCAmelCase = string[5:] # mid block if string[:2].isdigit(): _lowerCAmelCase = string[:2] _lowerCAmelCase = string[2:] else: _lowerCAmelCase = string[0] _lowerCAmelCase = string[1:] if depth == max_depth: _lowerCAmelCase = MID_NUM_TO_LAYER[layer_num] _lowerCAmelCase = "mid_block" elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7: _lowerCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _lowerCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7: _lowerCAmelCase = UP_NUM_TO_LAYER[layer_num] _lowerCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _lowerCAmelCase = DEPTH_0_TO_LAYER[layer_num] _lowerCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE_ ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _lowerCAmelCase = string_left[1:] if "resnets" in new_layer: _lowerCAmelCase = convert_resconv_naming(SCREAMING_SNAKE_CASE_ ) elif "attentions" in new_layer: _lowerCAmelCase = convert_attn_naming(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = new_string_left if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = prefix + "." + new_layer + "." + string_left else: _lowerCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _lowerCAmelCase = rename(SCREAMING_SNAKE_CASE_ ) # check if we need to transform from Conv => Linear for attention if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase = v return new_state_dict def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE_ ) == 1: if len(v.shape ) == 3: # weight _lowerCAmelCase = v[:, :, 0] else: # bias _lowerCAmelCase = v else: # qkv matrices _lowerCAmelCase = v.shape[0] _lowerCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _lowerCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _lowerCAmelCase = download(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = MODELS_MAP[model_name]["sample_rate"] _lowerCAmelCase = MODELS_MAP[model_name]["sample_size"] _lowerCAmelCase = Object() _lowerCAmelCase = sample_size _lowerCAmelCase = sample_rate _lowerCAmelCase = 0 _lowerCAmelCase = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = diffusers_model.state_dict() _lowerCAmelCase = DiffusionUncond(SCREAMING_SNAKE_CASE_ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )["state_dict"] ) _lowerCAmelCase = orig_model.diffusion_ema.eval() _lowerCAmelCase = orig_model.state_dict() _lowerCAmelCase = rename_orig_weights(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _lowerCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(SCREAMING_SNAKE_CASE_ ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _lowerCAmelCase = value.squeeze() _lowerCAmelCase = value diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = 100 _lowerCAmelCase = 33 _lowerCAmelCase = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1] _lowerCAmelCase = get_crash_schedule(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = torch.manual_seed(33 ) _lowerCAmelCase = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios _lowerCAmelCase = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} ) _lowerCAmelCase = generated.clamp(-1 , 1 ) _lowerCAmelCase = (generated - audio).abs().sum() _lowerCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , SCREAMING_SNAKE_CASE_ ) print("Diff max" , SCREAMING_SNAKE_CASE_ ) assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
18
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[Any] = AutoencoderKL __lowerCamelCase : List[Any] = "sample" __lowerCamelCase : Tuple = 1e-2 @property def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = 4 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) return {"sample": image} @property def _snake_case ( self ) -> Any: return (3, 32, 32) @property def _snake_case ( self ) -> List[Any]: return (3, 32, 32) def _snake_case ( self ) -> str: _lowerCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Any: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _snake_case ( self ) -> str: # enable deterministic behavior for gradient checkpointing _lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) model.to(_lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training _lowerCAmelCase = model(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _lowerCAmelCase = torch.randn_like(_lowerCAmelCase ) _lowerCAmelCase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _lowerCAmelCase = model_a(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _lowerCAmelCase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) _lowerCAmelCase = dict(model.named_parameters() ) _lowerCAmelCase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_lowerCAmelCase ) _lowerCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _snake_case ( self ) -> Dict: _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) _lowerCAmelCase = model.to(_lowerCAmelCase ) model.eval() if torch_device == "mps": _lowerCAmelCase = torch.manual_seed(0 ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _lowerCAmelCase = image.to(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample _lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _lowerCAmelCase = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": _lowerCAmelCase = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: _lowerCAmelCase = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy''' def _snake_case ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase ) return image def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = "fp16" if fpaa else None _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = AutoencoderKL.from_pretrained( _lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , ) model.to(_lowerCAmelCase ).eval() return model def _snake_case ( self , _lowerCAmelCase=0 ) -> str: if torch_device == "mps": return torch.manual_seed(_lowerCAmelCase ) return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist _lowerCAmelCase = dist.sample(generator=_lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) _lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
18
1
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 ): '''simple docstring''' _lowerCAmelCase = right or len(SCREAMING_SNAKE_CASE_ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = "gpt_bigcode" __lowerCamelCase : Optional[int] = ["past_key_values"] __lowerCamelCase : List[str] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = n_inner _lowerCAmelCase = activation_function _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = attn_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = scale_attn_weights _lowerCAmelCase = use_cache _lowerCAmelCase = attention_softmax_in_fpaa _lowerCAmelCase = scale_attention_softmax_in_fpaa _lowerCAmelCase = multi_query _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
18
'''simple docstring''' import math from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = "data2vec-audio" def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = feat_extract_activation _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = conv_bias _lowerCAmelCase = num_conv_pos_embeddings _lowerCAmelCase = num_conv_pos_embedding_groups _lowerCAmelCase = conv_pos_kernel_size _lowerCAmelCase = len(self.conv_dim ) _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = feat_proj_dropout _lowerCAmelCase = final_dropout _lowerCAmelCase = layerdrop _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = vocab_size _lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase = mask_time_prob _lowerCAmelCase = mask_time_length _lowerCAmelCase = mask_time_min_masks _lowerCAmelCase = mask_feature_prob _lowerCAmelCase = mask_feature_length _lowerCAmelCase = mask_feature_min_masks # ctc loss _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # adapter _lowerCAmelCase = add_adapter _lowerCAmelCase = adapter_kernel_size _lowerCAmelCase = adapter_stride _lowerCAmelCase = num_adapter_layers _lowerCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = list(_lowerCAmelCase ) _lowerCAmelCase = xvector_output_dim @property def _snake_case ( self ) -> str: return math.prod(self.conv_stride )
18
1
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "mask2former" __lowerCamelCase : Optional[Any] = ["swin"] __lowerCamelCase : str = {"hidden_size": "hidden_dim"} def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = 256 , _lowerCAmelCase = 256 , _lowerCAmelCase = 256 , _lowerCAmelCase = 1024 , _lowerCAmelCase = "relu" , _lowerCAmelCase = 6 , _lowerCAmelCase = 10 , _lowerCAmelCase = 8 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 2048 , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = 4 , _lowerCAmelCase = 255 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 2.0 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 12544 , _lowerCAmelCase = 3.0 , _lowerCAmelCase = 0.75 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = True , _lowerCAmelCase = [4, 8, 16, 32] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[Any]: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." ) _lowerCAmelCase = CONFIG_MAPPING["swin"]( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = backbone_config.pop("model_type" ) _lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase = config_class.from_dict(_lowerCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' f'''Supported model types: {','.join(self.backbones_supported )}''' ) _lowerCAmelCase = backbone_config _lowerCAmelCase = feature_size _lowerCAmelCase = mask_feature_size _lowerCAmelCase = hidden_dim _lowerCAmelCase = encoder_feedforward_dim _lowerCAmelCase = activation_function _lowerCAmelCase = encoder_layers _lowerCAmelCase = decoder_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = dim_feedforward _lowerCAmelCase = pre_norm _lowerCAmelCase = enforce_input_projection _lowerCAmelCase = common_stride _lowerCAmelCase = ignore_value _lowerCAmelCase = num_queries _lowerCAmelCase = no_object_weight _lowerCAmelCase = class_weight _lowerCAmelCase = mask_weight _lowerCAmelCase = dice_weight _lowerCAmelCase = train_num_points _lowerCAmelCase = oversample_ratio _lowerCAmelCase = importance_sample_ratio _lowerCAmelCase = init_std _lowerCAmelCase = init_xavier_std _lowerCAmelCase = use_auxiliary_loss _lowerCAmelCase = feature_strides _lowerCAmelCase = output_auxiliary_logits _lowerCAmelCase = decoder_layers super().__init__(**_lowerCAmelCase ) @classmethod def _snake_case ( cls , _lowerCAmelCase , **_lowerCAmelCase ) -> Dict: return cls( backbone_config=_lowerCAmelCase , **_lowerCAmelCase , ) def _snake_case ( self ) -> Dict[str, any]: _lowerCAmelCase = copy.deepcopy(self.__dict__ ) _lowerCAmelCase = self.backbone_config.to_dict() _lowerCAmelCase = self.__class__.model_type return output
18
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = (DDPMParallelScheduler,) def _snake_case ( self , **_lowerCAmelCase ) -> int: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase ) def _snake_case ( self ) -> Any: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: self.check_over_configs(thresholding=_lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , ) def _snake_case ( self ) -> int: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Dict: for t in [0, 500, 999]: self.check_over_forward(time_step=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = self.dummy_sample_deter + 0.1 _lowerCAmelCase = self.dummy_sample_deter - 0.1 _lowerCAmelCase = samplea.shape[0] _lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) _lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase ) _lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowerCAmelCase ) ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_lowerCAmelCase ) _lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(_lowerCAmelCase ): if i == len(_lowerCAmelCase ) - 1: _lowerCAmelCase = -1 else: _lowerCAmelCase = timesteps[i + 1] _lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase ) _lowerCAmelCase = prev_t.item() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [100, 87, 50, 1, 0] _lowerCAmelCase = len(_lowerCAmelCase ) with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=_lowerCAmelCase )
18
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None: _lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token _lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) _lowerCAmelCase = 3 _lowerCAmelCase = do_lower_case _lowerCAmelCase = remove_space _lowerCAmelCase = keep_accents _lowerCAmelCase = vocab_file _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) _lowerCAmelCase = jieba _lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _snake_case ( self ) -> Optional[int]: return len(self.sp_model ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: _lowerCAmelCase = self.__dict__.copy() _lowerCAmelCase = None return state def __setstate__( self , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _lowerCAmelCase = {} _lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _lowerCAmelCase ) -> str: if self.remove_space: _lowerCAmelCase = " ".join(inputs.strip().split() ) else: _lowerCAmelCase = inputs _lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase ) _lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase = outputs.lower() return outputs def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.preprocess_text(_lowerCAmelCase ) _lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) _lowerCAmelCase = [] for piece in pieces: if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase = cur_pieces[1:] else: _lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_lowerCAmelCase ) else: new_pieces.append(_lowerCAmelCase ) return new_pieces def _snake_case ( self , _lowerCAmelCase ) -> str: return self.sp_model.PieceToId(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: return self.sp_model.IdToPiece(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip() return out_string def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is not None: return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] return ([0] * len(_lowerCAmelCase )) + [1, 1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , "wb" ) as fi: _lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
18
1
'''simple docstring''' import datasets from .evaluate import evaluate _SCREAMING_SNAKE_CASE = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n" _SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n" _SCREAMING_SNAKE_CASE = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} _lowerCAmelCase = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] _lowerCAmelCase = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase ) return score
18
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets _SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _snake_case ( self ) -> Tuple: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]: _lowerCAmelCase = mean_squared_error( _lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase ) return {"mse": mse}
18
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "luke" def __init__( self , _lowerCAmelCase=50267 , _lowerCAmelCase=500000 , _lowerCAmelCase=768 , _lowerCAmelCase=256 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> List[Any]: super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = entity_vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = entity_emb_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = use_entity_aware_attention _lowerCAmelCase = classifier_dropout
18
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[Any] = "xlm-prophetnet" __lowerCamelCase : Dict = ["past_key_values"] __lowerCamelCase : Tuple = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self , _lowerCAmelCase = 0.1 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 30522 , _lowerCAmelCase = 1024 , _lowerCAmelCase = 4096 , _lowerCAmelCase = 12 , _lowerCAmelCase = 16 , _lowerCAmelCase = 4096 , _lowerCAmelCase = 12 , _lowerCAmelCase = 16 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 512 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 32 , _lowerCAmelCase = 128 , _lowerCAmelCase = False , _lowerCAmelCase = 0.0 , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 1 , _lowerCAmelCase = 2 , **_lowerCAmelCase , ) -> str: _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = num_encoder_layers _lowerCAmelCase = num_encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = num_decoder_layers _lowerCAmelCase = num_decoder_attention_heads _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = init_std # Normal(0, this parameter) _lowerCAmelCase = activation_function # parameters for xlmprophetnet _lowerCAmelCase = ngram _lowerCAmelCase = num_buckets _lowerCAmelCase = relative_max_distance _lowerCAmelCase = disable_ngram_loss _lowerCAmelCase = eps # 3 Types of Dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = dropout _lowerCAmelCase = use_cache super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) @property def _snake_case ( self ) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _snake_case ( self , _lowerCAmelCase ) -> int: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ): '''simple docstring''' if nth_term == "": return [""] _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE_ ) ): series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series")) _SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series")) print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power))
18
1
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = parent _lowerCAmelCase = config_class _lowerCAmelCase = has_text_modality _lowerCAmelCase = kwargs _lowerCAmelCase = common_properties def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.config_class(**self.inputs_dict ) _lowerCAmelCase = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(_lowerCAmelCase ): try: setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) self.parent.assertEqual( getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowerCAmelCase ): try: _lowerCAmelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.config_class(**self.inputs_dict ) _lowerCAmelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase = os.path.join(_lowerCAmelCase , "config.json" ) config_first.to_json_file(_lowerCAmelCase ) _lowerCAmelCase = self.config_class.from_json_file(_lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.config_class(**self.inputs_dict ) _lowerCAmelCase = "test" with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) config_first.save_pretrained(_lowerCAmelCase ) _lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) _lowerCAmelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _snake_case ( self ) -> str: if self.config_class.is_composition: return _lowerCAmelCase = self.config_class() self.parent.assertIsNotNone(_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = copy.deepcopy(_lowerCAmelCase ) _lowerCAmelCase = self.config_class(**_lowerCAmelCase ) _lowerCAmelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) ) elif getattr(_lowerCAmelCase , _lowerCAmelCase ) != value: wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase ), value) ) if len(_lowerCAmelCase ) > 0: _lowerCAmelCase = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' ) def _snake_case ( self ) -> Dict: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
18
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Optional[int] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Any = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Dict = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : int = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : List[str] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Tuple = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : str = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) class lowerCAmelCase_ ( metaclass=__magic_name__ ): __lowerCamelCase : Union[str, Any] = ["torch"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str: requires_backends(cls , ["torch"] ) @classmethod def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: requires_backends(cls , ["torch"] )
18
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Any: super().__init__() self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) @torch.no_grad() def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 100 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , ) -> Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: _lowerCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate _lowerCAmelCase = audio_length_in_s * self.unet.config.sample_rate _lowerCAmelCase = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) _lowerCAmelCase = int(_lowerCAmelCase ) if sample_size % down_scale_factor != 0: _lowerCAmelCase = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' " process." ) _lowerCAmelCase = int(_lowerCAmelCase ) _lowerCAmelCase = next(iter(self.unet.parameters() ) ).dtype _lowerCAmelCase = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) _lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase ) # set step values self.scheduler.set_timesteps(_lowerCAmelCase , device=audio.device ) _lowerCAmelCase = self.scheduler.timesteps.to(_lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _lowerCAmelCase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample # 2. compute previous image: x_t -> t_t-1 _lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample _lowerCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy() _lowerCAmelCase = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_lowerCAmelCase )
18
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = result.headers["Location"] _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' ) with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp: fp.write(response.content ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = None with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(SCREAMING_SNAKE_CASE_ ) as f: for line in f: _lowerCAmelCase = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _lowerCAmelCase = line[: line.index(": " )] _lowerCAmelCase = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _lowerCAmelCase = line[len("FAILED " ) :] failed_tests.append(SCREAMING_SNAKE_CASE_ ) elif filename == "job_name.txt": _lowerCAmelCase = line if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` ''' F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' " problem." ) _lowerCAmelCase = None if job_name and job_links: _lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # A list with elements of the form (line of error, error, failed test) _lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return result def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) ) return errors def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ): '''simple docstring''' _lowerCAmelCase = Counter() counter.update([x[1] for x in logs] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: _lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = test.split("::" )[0] if test.startswith("tests/models/" ): _lowerCAmelCase = test.split("/" )[2] else: _lowerCAmelCase = None return test def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] _lowerCAmelCase = [x for x in logs if x[2] is not None] _lowerCAmelCase = {x[2] for x in logs} _lowerCAmelCase = {} for test in tests: _lowerCAmelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _lowerCAmelCase = sum(error_counts.values() ) if n_errors > 0: _lowerCAmelCase = {"count": n_errors, "errors": error_counts} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| no. | error | status |" _lowerCAmelCase = "|-:|:-|:-|" _lowerCAmelCase = [header, sep] for error in reduced_by_error: _lowerCAmelCase = reduced_by_error[error]["count"] _lowerCAmelCase = F'''| {count} | {error[:100]} | |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| model | no. of errors | major error | count |" _lowerCAmelCase = "|-:|-:|-:|-:|" _lowerCAmelCase = [header, sep] for model in reduced_by_model: _lowerCAmelCase = reduced_by_model[model]["count"] _lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0] _lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") _SCREAMING_SNAKE_CASE = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token) _SCREAMING_SNAKE_CASE = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _SCREAMING_SNAKE_CASE = k.find(" / ") _SCREAMING_SNAKE_CASE = k[index + len(" / ") :] _SCREAMING_SNAKE_CASE = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _SCREAMING_SNAKE_CASE = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _SCREAMING_SNAKE_CASE = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = reduce_by_error(errors) _SCREAMING_SNAKE_CASE = reduce_by_model(errors) _SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error) _SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
18
1
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "masked_bert" def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase="topK" , _lowerCAmelCase="constant" , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ) -> List[Any]: super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = pruning_method _lowerCAmelCase = mask_init _lowerCAmelCase = mask_scale
18
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,) __lowerCamelCase : int = (("num_inference_steps", 25),) def _snake_case ( self , **_lowerCAmelCase ) -> Any: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase , _lowerCAmelCase = sample, sample for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self ) -> int: pass def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple: if scheduler is None: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample return sample def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = 50 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 _lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> str: self.check_over_configs(thresholding=_lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , ) def _snake_case ( self ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) _lowerCAmelCase = self.full_loop( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers" def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lower_order_final=_lowerCAmelCase ) self.check_over_configs(lower_order_final=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _snake_case ( self ) -> str: self.check_over_configs(variance_type=_lowerCAmelCase ) self.check_over_configs(variance_type="learned_range" ) def _snake_case ( self ) -> int: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop() _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
18
1
'''simple docstring''' import copy import random from transformers import CLIPTokenizer class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict: super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) _lowerCAmelCase = {} def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: _lowerCAmelCase = super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) if num_added_tokens == 0: raise ValueError( f'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' " `placeholder_token` that is not already in the tokenizer." ) def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=1 , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = [] if num_vec_per_token == 1: self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) output.append(_lowerCAmelCase ) else: _lowerCAmelCase = [] for i in range(_lowerCAmelCase ): _lowerCAmelCase = placeholder_token + f'''_{i}''' self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) output.append(_lowerCAmelCase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f'''The tokenizer already has placeholder token {token} that can get confused with''' f''' {placeholder_token}keep placeholder tokens independent''' ) _lowerCAmelCase = output def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 ) -> int: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = [] for i in range(len(_lowerCAmelCase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: _lowerCAmelCase = self.token_map[placeholder_token] _lowerCAmelCase = tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )] if vector_shuffle: _lowerCAmelCase = copy.copy(_lowerCAmelCase ) random.shuffle(_lowerCAmelCase ) _lowerCAmelCase = text.replace(_lowerCAmelCase , " ".join(_lowerCAmelCase ) ) return text def __call__( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , **_lowerCAmelCase ) -> Optional[int]: return super().__call__( self.replace_placeholder_tokens_in_text( _lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , ) def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , **_lowerCAmelCase ) -> Tuple: return super().encode( self.replace_placeholder_tokens_in_text( _lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
18
1
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' _lowerCAmelCase = R"\w+[.]\d+" _lowerCAmelCase = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for pat in pats: _lowerCAmelCase = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) ) return key def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): _lowerCAmelCase = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: _lowerCAmelCase = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: _lowerCAmelCase = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer _lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: _lowerCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": _lowerCAmelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _lowerCAmelCase = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _lowerCAmelCase = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=42 ): '''simple docstring''' _lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params _lowerCAmelCase = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters _lowerCAmelCase , _lowerCAmelCase = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown _lowerCAmelCase = jnp.asarray(SCREAMING_SNAKE_CASE_ ) return unflatten_dict(SCREAMING_SNAKE_CASE_ )
18
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text
18
1
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = [ "word_embeddings_layernorm.weight", "word_embeddings_layernorm.bias", "input_layernorm.weight", "input_layernorm.bias", "post_attention_layernorm.weight", "post_attention_layernorm.bias", "self_attention.dense.bias", "mlp.dense_4h_to_h.bias", "ln_f.weight", "ln_f.bias", ] _SCREAMING_SNAKE_CASE = [ "mlp.dense_4h_to_h.weight", "self_attention.dense.weight", ] def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' _lowerCAmelCase = { "word_embeddings.weight": "word_embeddings.weight", "word_embeddings.norm.weight": "word_embeddings_layernorm.weight", "word_embeddings.norm.bias": "word_embeddings_layernorm.bias", "weight": "ln_f.weight", "bias": "ln_f.bias", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks _lowerCAmelCase = int(re.match(R".*layer_(\d*).*" , SCREAMING_SNAKE_CASE_ )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def __a(SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 _lowerCAmelCase = re.search(R"[^\d](\d+)$" , str(SCREAMING_SNAKE_CASE_ ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) _lowerCAmelCase = int(bit_search.groups()[0] ) return bit_size // 8 def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' if bloom_config_file == "": _lowerCAmelCase = BloomConfig() else: _lowerCAmelCase = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) if shard_model: _lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = {"weight_map": {}, "metadata": {}} _lowerCAmelCase = 0 _lowerCAmelCase = None _lowerCAmelCase = BloomConfig() for j, file in enumerate(SCREAMING_SNAKE_CASE_ ): print("Processing file: {}".format(SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = None for i in range(SCREAMING_SNAKE_CASE_ ): # load all TP files _lowerCAmelCase = file.replace("model_00" , F'''model_0{i}''' ) _lowerCAmelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" ) # Rename keys in the transformers names _lowerCAmelCase = list(temp.keys() ) for key in keys: _lowerCAmelCase = temp.pop(SCREAMING_SNAKE_CASE_ ) if tensors is None: _lowerCAmelCase = temp else: for key in tensors.keys(): if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _lowerCAmelCase = tensors[key] / pretraining_tp torch.save( SCREAMING_SNAKE_CASE_ , os.path.join( SCREAMING_SNAKE_CASE_ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): _lowerCAmelCase = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: _lowerCAmelCase = "pytorch_model_{}-of-{}.bin".format( str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) _lowerCAmelCase = BloomConfig() _lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME _lowerCAmelCase = total_size with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f: _lowerCAmelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n" f.write(SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase = BloomModel(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) ) _lowerCAmelCase = None for i, file in enumerate(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = None for i in range(SCREAMING_SNAKE_CASE_ ): # load all TP files _lowerCAmelCase = file.replace("model_00" , F'''model_0{i}''' ) _lowerCAmelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" ) # Rename keys in the transformers names _lowerCAmelCase = list(temp.keys() ) for key in keys: _lowerCAmelCase = temp.pop(SCREAMING_SNAKE_CASE_ ) if tensors is None: _lowerCAmelCase = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _lowerCAmelCase = tensors[key] / pretraining_tp _lowerCAmelCase = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: _lowerCAmelCase = set(other_keys.missing_keys ) else: _lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME _lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: _lowerCAmelCase = model.to(config.torch_dtype ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bloom_checkpoint_path", default=None, type=str, required=True, help="Path to the Megatron-LM checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--bloom_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--shard_model", action="store_true", help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint", ) parser.add_argument( "--pretraining_tp", default=4, type=int, help="Pretraining TP rank that has been used when training the model in Megatron-LM \n", ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
18
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "blenderbot-small" __lowerCamelCase : Optional[Any] = ["past_key_values"] __lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict: _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = use_cache _lowerCAmelCase = encoder_layers _lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) class lowerCAmelCase_ ( __magic_name__ ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase = {0: "batch"} _lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} _lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} else: _lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super().outputs else: _lowerCAmelCase = super(_lowerCAmelCase , self ).outputs if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(_lowerCAmelCase ): _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} _lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Generate decoder inputs _lowerCAmelCase = seq_length if not self.use_past else 1 _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape _lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1] _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = decoder_seq_length + 3 _lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers _lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase ), ) ) # TODO: test this. _lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_lowerCAmelCase , _lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowerCAmelCase = seqlen + 2 _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = common_inputs["attention_mask"].dtype _lowerCAmelCase = torch.cat( [common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 ) _lowerCAmelCase = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase ) ] return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase ) _lowerCAmelCase = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) elif self.task == "causal-lm": _lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) else: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) return common_inputs def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
18
1
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if n == 1 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return 0 elif n == 2: return 1 else: _lowerCAmelCase = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' _lowerCAmelCase = 0 _lowerCAmelCase = 2 while digits < n: index += 1 _lowerCAmelCase = len(str(fibonacci(SCREAMING_SNAKE_CASE_ ) ) ) return index def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ): '''simple docstring''' return fibonacci_digits_index(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
18
'''simple docstring''' import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" _SCREAMING_SNAKE_CASE = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str: if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] ) _lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] ) else: _lowerCAmelCase = np.asarray(_lowerCAmelCase ) _lowerCAmelCase = np.asarray(_lowerCAmelCase ) if ignore_case: _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) _lowerCAmelCase = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: _lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) if ignore_numbers: _lowerCAmelCase = string.digits.maketrans("" , "" , string.digits ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase ) _lowerCAmelCase = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
18
1
'''simple docstring''' import torch def __a(): '''simple docstring''' if torch.cuda.is_available(): _lowerCAmelCase = torch.cuda.device_count() else: _lowerCAmelCase = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
18
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
18
1
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' def wrapper(*SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ): _lowerCAmelCase = timeit.default_timer() _lowerCAmelCase = func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = timeit.default_timer() - starttime return delta _lowerCAmelCase = func.__name__ return wrapper def __a(SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : List[Any]=100 , SCREAMING_SNAKE_CASE_ : Optional[int]=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = seq_shapes or {} for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(SCREAMING_SNAKE_CASE_ , _ArrayXD ): _lowerCAmelCase = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(SCREAMING_SNAKE_CASE_ , datasets.Value ): if v.dtype == "string": _lowerCAmelCase = "The small grey turtle was surprisingly fast when challenged." else: _lowerCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(SCREAMING_SNAKE_CASE_ , datasets.Sequence ): while isinstance(SCREAMING_SNAKE_CASE_ , datasets.Sequence ): _lowerCAmelCase = v.feature _lowerCAmelCase = seq_shapes[k] _lowerCAmelCase = np.random.rand(*SCREAMING_SNAKE_CASE_ ).astype(v.dtype ) _lowerCAmelCase = data dummy_data.append((i, example) ) return dummy_data def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=100 , SCREAMING_SNAKE_CASE_ : int=None ): '''simple docstring''' _lowerCAmelCase = generate_examples(SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ , seq_shapes=SCREAMING_SNAKE_CASE_ ) with ArrowWriter(features=SCREAMING_SNAKE_CASE_ , path=SCREAMING_SNAKE_CASE_ ) as writer: for key, record in dummy_data: _lowerCAmelCase = features.encode_example(SCREAMING_SNAKE_CASE_ ) writer.write(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase , _lowerCAmelCase = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) _lowerCAmelCase = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE_ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE_ ) ) return dataset
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = "falcon" __lowerCamelCase : List[str] = ["past_key_values"] def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase ) _lowerCAmelCase = hidden_size if n_embed is None else n_embed _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCAmelCase = alibi _lowerCAmelCase = new_decoder_architecture _lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True _lowerCAmelCase = parallel_attn _lowerCAmelCase = bias super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) @property def _snake_case ( self ) -> Optional[Any]: return self.hidden_size // self.num_attention_heads @property def _snake_case ( self ) -> Optional[Any]: return not self.alibi
18
1
'''simple docstring''' from importlib import import_module from .logging import get_logger _SCREAMING_SNAKE_CASE = get_logger(__name__) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> int: _lowerCAmelCase = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) ) _lowerCAmelCase = module._original_module if isinstance(_lowerCAmelCase , _PatchedModuleObj ) else module class lowerCAmelCase_ : __lowerCamelCase : Dict = [] def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]: _lowerCAmelCase = obj _lowerCAmelCase = target _lowerCAmelCase = new _lowerCAmelCase = target.split("." )[0] _lowerCAmelCase = {} _lowerCAmelCase = attrs or [] def __enter__( self ) -> Optional[int]: *_lowerCAmelCase , _lowerCAmelCase = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(_lowerCAmelCase ) ): try: _lowerCAmelCase = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _lowerCAmelCase = getattr(self.obj , _lowerCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(_lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _lowerCAmelCase = obj_attr # patch at top level setattr(self.obj , _lowerCAmelCase , _PatchedModuleObj(_lowerCAmelCase , attrs=self.attrs ) ) _lowerCAmelCase = getattr(self.obj , _lowerCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(_lowerCAmelCase , _lowerCAmelCase , _PatchedModuleObj(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , attrs=self.attrs ) ) _lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase ) # finally set the target attribute setattr(_lowerCAmelCase , _lowerCAmelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _lowerCAmelCase = getattr(import_module(".".join(_lowerCAmelCase ) ) , _lowerCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , _lowerCAmelCase ) is attr_value: _lowerCAmelCase = getattr(self.obj , _lowerCAmelCase ) setattr(self.obj , _lowerCAmelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _lowerCAmelCase = globals()["__builtins__"][target_attr] setattr(self.obj , _lowerCAmelCase , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self , *_lowerCAmelCase ) -> str: for attr in list(self.original ): setattr(self.obj , _lowerCAmelCase , self.original.pop(_lowerCAmelCase ) ) def _snake_case ( self ) -> Optional[int]: self.__enter__() self._active_patches.append(self ) def _snake_case ( self ) -> Any: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
18
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[int] = "deit" def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = qkv_bias _lowerCAmelCase = encoder_stride class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = version.parse("1.11" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ) -> float: return 1E-4
18
1
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> str: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def _snake_case ( self ) -> Tuple: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ) -> Optional[int]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = BioGptModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) _lowerCAmelCase = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Tuple: _lowerCAmelCase = BioGptForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = BioGptModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # create attention mask _lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_lowerCAmelCase ) _lowerCAmelCase = self.seq_length // 2 _lowerCAmelCase = 0 # first forward pass _lowerCAmelCase , _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids _lowerCAmelCase = ids_tensor((1,) , _lowerCAmelCase ).item() + 1 _lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) _lowerCAmelCase = random_other_next_tokens # append to next input_ids and attn_mask _lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_lowerCAmelCase )] , dim=1 , ) # get two different outputs _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )["last_hidden_state"] _lowerCAmelCase = model(_lowerCAmelCase , past_key_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )["last_hidden_state"] # select random slice _lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() _lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Dict: _lowerCAmelCase = BioGptModel(config=_lowerCAmelCase ).to(_lowerCAmelCase ).eval() _lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_lowerCAmelCase ) # first forward pass _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )["last_hidden_state"] _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[ "last_hidden_state" ] # select random slice _lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[int]: _lowerCAmelCase = BioGptForCausalLM(_lowerCAmelCase ) model.to(_lowerCAmelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() _lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase ) -> int: _lowerCAmelCase = BioGptModel(_lowerCAmelCase ) _lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = BioGptForTokenClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[int] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __lowerCamelCase : int = (BioGptForCausalLM,) if is_torch_available() else () __lowerCamelCase : str = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __lowerCamelCase : int = False def _snake_case ( self ) -> str: _lowerCAmelCase = BioGptModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def _snake_case ( self ) -> Dict: self.config_tester.run_common_tests() def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*_lowerCAmelCase , gradient_checkpointing=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_lowerCAmelCase ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*_lowerCAmelCase ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*_lowerCAmelCase ) @slow def _snake_case ( self ) -> Tuple: _lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(_lowerCAmelCase ) _lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) _lowerCAmelCase = "left" # Define PAD Token = EOS Token = 50256 _lowerCAmelCase = tokenizer.eos_token _lowerCAmelCase = model.config.eos_token_id # use different length sentences to test batching _lowerCAmelCase = [ "Hello, my dog is a little", "Today, I", ] _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase ) _lowerCAmelCase = inputs["input_ids"].to(_lowerCAmelCase ) _lowerCAmelCase = model.generate( input_ids=_lowerCAmelCase , attention_mask=inputs["attention_mask"].to(_lowerCAmelCase ) , ) _lowerCAmelCase = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase ) _lowerCAmelCase = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() _lowerCAmelCase = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase , max_length=model.config.max_length - num_paddings ) _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] ) @slow def _snake_case ( self ) -> Tuple: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = BioGptModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _snake_case ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = 3 _lowerCAmelCase = input_dict["input_ids"] _lowerCAmelCase = input_ids.ne(1 ).to(_lowerCAmelCase ) _lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _lowerCAmelCase = BioGptForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = 3 _lowerCAmelCase = "multi_label_classification" _lowerCAmelCase = input_dict["input_ids"] _lowerCAmelCase = input_ids.ne(1 ).to(_lowerCAmelCase ) _lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _lowerCAmelCase = BioGptForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @slow def _snake_case ( self ) -> str: _lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) _lowerCAmelCase = torch.tensor([[2, 4805, 9, 656, 21]] ) _lowerCAmelCase = model(_lowerCAmelCase )[0] _lowerCAmelCase = 42384 _lowerCAmelCase = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def _snake_case ( self ) -> List[str]: _lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) _lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(_lowerCAmelCase ) torch.manual_seed(0 ) _lowerCAmelCase = tokenizer("COVID-19 is" , return_tensors="pt" ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate( **_lowerCAmelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_lowerCAmelCase , ) _lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
18
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=True , _lowerCAmelCase=1 / 255 , _lowerCAmelCase=True , ) -> int: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _lowerCAmelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_pad def _snake_case ( self ) -> Dict: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: if not batched: _lowerCAmelCase = image_inputs[0] if isinstance(_lowerCAmelCase , Image.Image ): _lowerCAmelCase , _lowerCAmelCase = image.size else: _lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2] if w < h: _lowerCAmelCase = int(self.size["shortest_edge"] * h / w ) _lowerCAmelCase = self.size["shortest_edge"] elif w > h: _lowerCAmelCase = self.size["shortest_edge"] _lowerCAmelCase = int(self.size["shortest_edge"] * w / h ) else: _lowerCAmelCase = self.size["shortest_edge"] _lowerCAmelCase = self.size["shortest_edge"] else: _lowerCAmelCase = [] for image in image_inputs: _lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0] _lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ): __lowerCamelCase : Union[str, Any] = ConditionalDetrImageProcessor if is_vision_available() else None def _snake_case ( self ) -> List[str]: _lowerCAmelCase = ConditionalDetrImageProcessingTester(self ) @property def _snake_case ( self ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_lowerCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_lowerCAmelCase , "size" ) ) def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , _lowerCAmelCase ) _lowerCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , _lowerCAmelCase ) def _snake_case ( self ) -> Tuple: pass def _snake_case ( self ) -> List[str]: # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ) -> Optional[Any]: # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ) -> Optional[Any]: # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _snake_case ( self ) -> str: # prepare image and target _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: _lowerCAmelCase = json.loads(f.read() ) _lowerCAmelCase = {"image_id": 39769, "annotations": target} # encode them _lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) _lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors="pt" ) # verify pixel values _lowerCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) ) # verify area _lowerCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowerCAmelCase ) ) # verify boxes _lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowerCAmelCase , atol=1E-3 ) ) # verify image_id _lowerCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowerCAmelCase ) ) # verify is_crowd _lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowerCAmelCase ) ) # verify class_labels _lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowerCAmelCase ) ) # verify orig_size _lowerCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowerCAmelCase ) ) # verify size _lowerCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowerCAmelCase ) ) @slow def _snake_case ( self ) -> List[Any]: # prepare image, target and masks_path _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: _lowerCAmelCase = json.loads(f.read() ) _lowerCAmelCase = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} _lowerCAmelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them _lowerCAmelCase = ConditionalDetrImageProcessor(format="coco_panoptic" ) _lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors="pt" ) # verify pixel values _lowerCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) ) # verify area _lowerCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowerCAmelCase ) ) # verify boxes _lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowerCAmelCase , atol=1E-3 ) ) # verify image_id _lowerCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowerCAmelCase ) ) # verify is_crowd _lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowerCAmelCase ) ) # verify class_labels _lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowerCAmelCase ) ) # verify masks _lowerCAmelCase = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _lowerCAmelCase ) # verify orig_size _lowerCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowerCAmelCase ) ) # verify size _lowerCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowerCAmelCase ) )
18
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : int = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = size if size is not None else {"shortest_edge": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCAmelCase = int((256 / 224) * size["shortest_edge"] ) _lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
18
1
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name _SCREAMING_SNAKE_CASE = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n" def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=8 ): '''simple docstring''' _lowerCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _lowerCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[int]: super().__init__() self.register_modules( unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , ) _lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: if latents is None: _lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _lowerCAmelCase = latents.to(_lowerCAmelCase ) _lowerCAmelCase = latents * scheduler.init_noise_sigma return latents def _snake_case ( self , _lowerCAmelCase=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) _lowerCAmelCase = torch.device(f'''cuda:{gpu_id}''' ) _lowerCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase=0 ) -> Tuple: if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) _lowerCAmelCase = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=_lowerCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _lowerCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: _lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase ) # We'll offload the last model manually. _lowerCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _snake_case ( self ) -> int: if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(_lowerCAmelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowerCAmelCase ) def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 512 , _lowerCAmelCase = 512 , _lowerCAmelCase = 100 , _lowerCAmelCase = 4.0 , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , ) -> List[str]: _lowerCAmelCase = self._execution_device _lowerCAmelCase = guidance_scale > 1.0 if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 ) _lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _lowerCAmelCase = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 ) _lowerCAmelCase = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 ) _lowerCAmelCase = hint.repeat_interleave(_lowerCAmelCase , dim=0 ) _lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase ) _lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase ) self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase ) _lowerCAmelCase = self.scheduler.timesteps _lowerCAmelCase = self.movq.config.latent_channels _lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor ) # create initial latent _lowerCAmelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase = {"image_embeds": image_embeds, "hint": hint} _lowerCAmelCase = self.unet( sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0] if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) _lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 ) _lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 ) _lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase = self.scheduler.step( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0] # post-processing _lowerCAmelCase = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: _lowerCAmelCase = image * 0.5 + 0.5 _lowerCAmelCase = image.clamp(0 , 1 ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _lowerCAmelCase = self.numpy_to_pil(_lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCAmelCase )
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "donut-swin" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
18
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) torch.set_grad_enabled(False) _SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu" def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=100 , SCREAMING_SNAKE_CASE_ : Optional[int]=" " ): '''simple docstring''' _lowerCAmelCase = text.split(SCREAMING_SNAKE_CASE_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )] def __a(SCREAMING_SNAKE_CASE_ : dict ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = [], [] for title, text in zip(documents["title"] , documents["text"] ): if text is not None: for passage in split_text(SCREAMING_SNAKE_CASE_ ): titles.append(title if title is not None else "" ) texts.append(SCREAMING_SNAKE_CASE_ ) return {"title": titles, "text": texts} def __a(SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : DPRContextEncoder , SCREAMING_SNAKE_CASE_ : DPRContextEncoderTokenizerFast ): '''simple docstring''' _lowerCAmelCase = ctx_tokenizer( documents["title"] , documents["text"] , truncation=SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )["input_ids"] _lowerCAmelCase = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __a(SCREAMING_SNAKE_CASE_ : "RagExampleArguments" , SCREAMING_SNAKE_CASE_ : "ProcessingArguments" , SCREAMING_SNAKE_CASE_ : "IndexHnswArguments" , ): '''simple docstring''' logger.info("Step 1 - Create the dataset" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way _lowerCAmelCase = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words _lowerCAmelCase = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc ) # And compute the embeddings _lowerCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) _lowerCAmelCase = Features( {"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space _lowerCAmelCase = dataset.map( partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , ) # And finally save your dataset _lowerCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" ) dataset.save_to_disk(SCREAMING_SNAKE_CASE_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search _lowerCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("embeddings" , custom_index=SCREAMING_SNAKE_CASE_ ) # And save the index _lowerCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" ) dataset.get_index("embeddings" ).save(SCREAMING_SNAKE_CASE_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCAmelCase_ : __lowerCamelCase : str = field( default=str(Path(__magic_name__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,) __lowerCamelCase : Optional[str] = field( default=__magic_name__ ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,) __lowerCamelCase : str = field( default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,) __lowerCamelCase : str = field( default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } ,) __lowerCamelCase : Optional[str] = field( default=str(Path(__magic_name__ ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,) @dataclass class lowerCAmelCase_ : __lowerCamelCase : Optional[int] = field( default=__magic_name__ ,metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } ,) __lowerCamelCase : int = field( default=16 ,metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } ,) @dataclass class lowerCAmelCase_ : __lowerCamelCase : int = field( default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,) __lowerCamelCase : int = field( default=128 ,metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) _SCREAMING_SNAKE_CASE = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: _SCREAMING_SNAKE_CASE = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "swinv2" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) _lowerCAmelCase = (0, 0, 0, 0)
18
1
'''simple docstring''' import datasets from .evaluate import evaluate _SCREAMING_SNAKE_CASE = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n" _SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n" _SCREAMING_SNAKE_CASE = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} _lowerCAmelCase = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] _lowerCAmelCase = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase ) return score
18
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[Any] = AutoencoderKL __lowerCamelCase : List[Any] = "sample" __lowerCamelCase : Tuple = 1e-2 @property def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = 4 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase ) return {"sample": image} @property def _snake_case ( self ) -> Any: return (3, 32, 32) @property def _snake_case ( self ) -> List[Any]: return (3, 32, 32) def _snake_case ( self ) -> str: _lowerCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } _lowerCAmelCase = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Any: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _snake_case ( self ) -> str: # enable deterministic behavior for gradient checkpointing _lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common() _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) model.to(_lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training _lowerCAmelCase = model(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _lowerCAmelCase = torch.randn_like(_lowerCAmelCase ) _lowerCAmelCase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _lowerCAmelCase = self.model_class(**_lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _lowerCAmelCase = model_a(**_lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _lowerCAmelCase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) _lowerCAmelCase = dict(model.named_parameters() ) _lowerCAmelCase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _snake_case ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_lowerCAmelCase ) _lowerCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _snake_case ( self ) -> Dict: _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) _lowerCAmelCase = model.to(_lowerCAmelCase ) model.eval() if torch_device == "mps": _lowerCAmelCase = torch.manual_seed(0 ) else: _lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) _lowerCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _lowerCAmelCase = image.to(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample _lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _lowerCAmelCase = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": _lowerCAmelCase = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: _lowerCAmelCase = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy''' def _snake_case ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase ) return image def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple: _lowerCAmelCase = "fp16" if fpaa else None _lowerCAmelCase = torch.floataa if fpaa else torch.floataa _lowerCAmelCase = AutoencoderKL.from_pretrained( _lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , ) model.to(_lowerCAmelCase ).eval() return model def _snake_case ( self , _lowerCAmelCase=0 ) -> str: if torch_device == "mps": return torch.manual_seed(_lowerCAmelCase ) return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model(_lowerCAmelCase ).sample assert sample.shape == image.shape _lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase ) _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _lowerCAmelCase = model.decode(_lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = self.get_sd_vae_model() _lowerCAmelCase = self.get_sd_image(_lowerCAmelCase ) _lowerCAmelCase = self.get_generator(_lowerCAmelCase ) with torch.no_grad(): _lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist _lowerCAmelCase = dist.sample(generator=_lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu() _lowerCAmelCase = torch.tensor(_lowerCAmelCase ) _lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
18
1