code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __a: int = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class UpperCAmelCase ( lowerCamelCase__ ): '''simple docstring''' def __init__( self , **__lowerCAmelCase ) -> Optional[Any]: super().__init__(**lowercase__ ) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type(lowercase__ ) def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str: if "text_queries" in kwargs: lowercase__ : Tuple = kwargs.pop('''text_queries''' ) if isinstance(lowercase__ , (str, Image.Image) ): lowercase__ : int = {'''image''': image, '''candidate_labels''': candidate_labels} else: lowercase__ : List[Any] = image lowercase__ : Optional[int] = super().__call__(lowercase__ , **lowercase__ ) return results def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[Any]: lowercase__ : str = {} if "threshold" in kwargs: lowercase__ : Optional[Any] = kwargs['''threshold'''] if "top_k" in kwargs: lowercase__ : List[Any] = kwargs['''top_k'''] return {}, {}, postprocess_params def _lowerCAmelCase( self , __lowerCAmelCase ) -> int: lowercase__ : Optional[int] = load_image(inputs['''image'''] ) lowercase__ : Optional[int] = inputs['''candidate_labels'''] if isinstance(lowercase__ , lowercase__ ): lowercase__ : int = candidate_labels.split(''',''' ) lowercase__ : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(lowercase__ ): lowercase__ : List[Any] = self.tokenizer(lowercase__ , return_tensors=self.framework ) lowercase__ : Optional[int] = self.image_processor(lowercase__ , return_tensors=self.framework ) yield { "is_last": i == len(lowercase__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict: lowercase__ : Optional[Any] = model_inputs.pop('''target_size''' ) lowercase__ : Tuple = model_inputs.pop('''candidate_label''' ) lowercase__ : Any = model_inputs.pop('''is_last''' ) lowercase__ : str = self.model(**lowercase__ ) lowercase__ : List[str] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0.1 , __lowerCAmelCase=None ) -> Union[str, Any]: lowercase__ : Union[str, Any] = [] for model_output in model_outputs: lowercase__ : Union[str, Any] = model_output['''candidate_label'''] lowercase__ : Union[str, Any] = BaseModelOutput(lowercase__ ) lowercase__ : Optional[Any] = self.image_processor.post_process_object_detection( outputs=lowercase__ , threshold=lowercase__ , target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): lowercase__ : int = outputs['''scores'''][index].item() lowercase__ : str = self._get_bounding_box(outputs['''boxes'''][index][0] ) lowercase__ : Optional[Any] = {'''score''': score, '''label''': label, '''box''': box} results.append(lowercase__ ) lowercase__ : List[str] = sorted(lowercase__ , key=lambda __lowerCAmelCase : x["score"] , reverse=lowercase__ ) if top_k: lowercase__ : Union[str, Any] = results[:top_k] return results def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]: if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = box.int().tolist() lowercase__ : Union[str, Any] = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
152
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small' SCREAMING_SNAKE_CASE : int = ['past_key_values'] SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,): __lowercase = vocab_size __lowercase = max_position_embeddings __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = use_cache __lowercase = encoder_layers __lowercase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,) class lowercase_ (lowerCamelCase__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase = {0: '''batch'''} __lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super().outputs else: __lowercase = super(lowercase__ ,self ).outputs if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) # Generate decoder inputs __lowercase = seq_length if not self.use_past else 1 __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __lowercase = dict(**lowercase__ ,**lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape __lowercase = common_inputs['''decoder_input_ids'''].shape[1] __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = decoder_seq_length + 3 __lowercase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 ) __lowercase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase , __lowercase = self.num_layers __lowercase = min(lowercase__ ,lowercase__ ) __lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers __lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase__ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), ) ) # TODO: test this. __lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase__ ,lowercase__ ): common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __lowercase = seqlen + 2 __lowercase , __lowercase = self.num_layers __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = common_inputs['''attention_mask'''].dtype __lowercase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 ) __lowercase = [ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ ) ] return common_inputs def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = tokenizer.num_special_tokens_to_add(lowercase__ ) __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ ) # Generate dummy inputs according to compute batch and sequence __lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): if self.task in ["default", "seq2seq-lm"]: __lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) elif self.task == "causal-lm": __lowercase = self._generate_dummy_inputs_for_causal_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) else: __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) else: __lowercase = super(lowercase__ ,self )._flatten_past_key_values_( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
41
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=lowercase__ , ) assert hasattr(self , '''env''' ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: str = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowercase__: Tuple = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase__ , instance_count=lowercase__ , instance_type=self.instance_type , debugger_hook_config=lowercase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase__ , py_version='''py36''' , ) def _snake_case ( self , _UpperCAmelCase ): TrainingJobAnalytics(lowercase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def _snake_case ( self , _UpperCAmelCase ): # create estimator lowercase__: Dict = self.create_estimator(lowercase__ ) # run training estimator.fit() # result dataframe lowercase__: Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowercase__: Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) lowercase__: Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowercase__: Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowercase__ )
586
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ ): """simple docstring""" if b == 0: return (1, 0) ((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b ) __lowercase = a // b return (y, x - k * y) def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m def _A ( A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) if b < 0: __lowercase = (b % n + n) % n return b def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
41
0
"""simple docstring""" import os import string import sys lowerCAmelCase_ = 1 << 8 lowerCAmelCase_ = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 27, '''up''': 65 + ARROW_KEY_FLAG, '''down''': 66 + ARROW_KEY_FLAG, '''right''': 67 + ARROW_KEY_FLAG, '''left''': 68 + ARROW_KEY_FLAG, '''mod_int''': 91, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 50, '''delete''': 51, '''pg_up''': 53, '''pg_down''': 54, } lowerCAmelCase_ = KEYMAP['''up'''] lowerCAmelCase_ = KEYMAP['''left'''] if sys.platform == "win32": lowerCAmelCase_ = [] lowerCAmelCase_ = { b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(10): lowerCAmelCase_ = ord(str(i)) def lowerCamelCase_()-> Any: if os.name == "nt": import msvcrt _SCREAMING_SNAKE_CASE : List[str] = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(A__ ) == 0: # Read the keystroke _SCREAMING_SNAKE_CASE : List[str] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _SCREAMING_SNAKE_CASE : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _SCREAMING_SNAKE_CASE : Any = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(A__ ) if ord(A__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = chr(KEYMAP["""esc"""] ) except KeyError: _SCREAMING_SNAKE_CASE : List[str] = cha[1] else: _SCREAMING_SNAKE_CASE : Any = ch.decode(A__ ) else: _SCREAMING_SNAKE_CASE : Union[str, Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _SCREAMING_SNAKE_CASE : Dict = sys.stdin.fileno() _SCREAMING_SNAKE_CASE : List[str] = termios.tcgetattr(A__ ) try: tty.setraw(A__ ) _SCREAMING_SNAKE_CASE : int = sys.stdin.read(1 ) finally: termios.tcsetattr(A__ , termios.TCSADRAIN , A__ ) return ch def lowerCamelCase_()-> Dict: _SCREAMING_SNAKE_CASE : int = get_raw_chars() if ord(A__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(A__ ) == KEYMAP["esc"]: _SCREAMING_SNAKE_CASE : Optional[int] = get_raw_chars() if ord(A__ ) == KEYMAP["mod_int"]: _SCREAMING_SNAKE_CASE : Union[str, Any] = get_raw_chars() if ord(A__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(A__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
338
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _A ( ): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __lowercase = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , A__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _A ( ): """simple docstring""" assert _test_patching.open is open __lowercase = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , A__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ): pass def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , A__ ) is None with patch_submodule(_test_patching , '''len''' , A__ ): assert _test_patching.len is mock assert _test_patching.len is len def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_start_and_stop_mock__''' __lowercase = patch_submodule(_test_patching , '''open''' , A__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _A ( ): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __lowercase = '''__test_patch_submodule_successive_join__''' __lowercase = '''__test_patch_submodule_successive_dirname__''' __lowercase = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ): pass
41
0
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def a_ (__A ) -> Optional[Any]: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def a_ () -> Union[str, Any]: """simple docstring""" with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __a : int = [1, 2, 3] with pytest.raises(A__ ): with parallel_backend("unsupported backend" ): map_nested(A__ , A__ , num_proc=2 ) with pytest.raises(A__ ): with parallel_backend("unsupported backend" ): map_nested(A__ , A__ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def a_ (__A ) -> Dict: """simple docstring""" __a : Union[str, Any] = [1, 2] __a : Union[str, Any] = {"a": 1, "b": 2} __a : Tuple = {"a": [1, 2], "b": [3, 4]} __a : Dict = {"a": {"1": 1}, "b": 2} __a : Optional[Any] = {"a": 1, "b": 2, "c": 3, "d": 4} __a : Optional[int] = [2, 3] __a : str = {"a": 2, "b": 3} __a : Dict = {"a": [2, 3], "b": [4, 5]} __a : Optional[int] = {"a": {"1": 2}, "b": 3} __a : Tuple = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
351
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase_ : """simple docstring""" def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __lowercase = ids_tensor([self.batch_size] ,self.num_choices ) __lowercase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return NezhaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.prepare_config_and_inputs() __lowercase = True __lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ): __lowercase = NezhaModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,): __lowercase = True __lowercase = NezhaModel(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,) __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): __lowercase = NezhaForMaskedLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): __lowercase = NezhaForNextSentencePrediction(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ): __lowercase = NezhaForPreTraining(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ): __lowercase = NezhaForQuestionAnswering(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ): __lowercase = self.num_labels __lowercase = NezhaForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ): __lowercase = self.num_labels __lowercase = NezhaForTokenClassification(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ): __lowercase = self.num_choices __lowercase = NezhaForMultipleChoice(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Tuple = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : List[str] = True def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ): __lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) if return_labels: if model_class in get_values(lowercase__ ): __lowercase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ ) __lowercase = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = NezhaModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : int ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): # This regression test was failing with PyTorch < 1.3 ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __lowercase = None self.model_tester.create_and_check_model_as_decoder( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = NezhaModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __lowercase = True __lowercase = model_class(config=lowercase__ ) __lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ) __lowercase = torch.jit.trace( lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) ) __lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ ) loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
41
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Union[str, Any] = logging.get_logger(__name__) A_ : Union[str, Any] = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class lowerCAmelCase__ ( lowerCamelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = 'distilbert' _SCREAMING_SNAKE_CASE : Any = { 'hidden_size': 'dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', } def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any]=30_522 , _SCREAMING_SNAKE_CASE : Optional[int]=512 , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : Optional[Any]=6 , _SCREAMING_SNAKE_CASE : Optional[int]=12 , _SCREAMING_SNAKE_CASE : List[Any]=768 , _SCREAMING_SNAKE_CASE : Any=4 * 768 , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : List[str]="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0.2 , _SCREAMING_SNAKE_CASE : List[str]=0 , **_SCREAMING_SNAKE_CASE : Tuple , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[Any] = sinusoidal_pos_embds SCREAMING_SNAKE_CASE : List[Any] = n_layers SCREAMING_SNAKE_CASE : int = n_heads SCREAMING_SNAKE_CASE : Any = dim SCREAMING_SNAKE_CASE : int = hidden_dim SCREAMING_SNAKE_CASE : str = dropout SCREAMING_SNAKE_CASE : int = attention_dropout SCREAMING_SNAKE_CASE : str = activation SCREAMING_SNAKE_CASE : int = initializer_range SCREAMING_SNAKE_CASE : List[Any] = qa_dropout SCREAMING_SNAKE_CASE : Any = seq_classif_dropout super().__init__(**lowercase__ , pad_token_id=lowercase__ ) class lowerCAmelCase__ ( lowerCamelCase__ ): '''simple docstring''' @property def _lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE : Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
265
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''KEY''') lowerCAmelCase__ = TypeVar('''VAL''') @dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ ) class lowercase_ (Generic[KEY, VAL] ): """simple docstring""" SCREAMING_SNAKE_CASE : KEY SCREAMING_SNAKE_CASE : VAL class lowercase_ (_Item ): """simple docstring""" def __init__( self : Optional[int] ): super().__init__(lowercase__ ,lowercase__ ) def __bool__( self : List[str] ): return False lowerCAmelCase__ = _DeletedItem() class lowercase_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ): __lowercase = initial_block_size __lowercase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowercase = capacity_factor __lowercase = 0 def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ): return hash(lowercase__ ) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ): return (ind + 1) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ): __lowercase = self._buckets[ind] if not stored: __lowercase = _Item(lowercase__ ,lowercase__ ) self._len += 1 return True elif stored.key == key: __lowercase = _Item(lowercase__ ,lowercase__ ) return True else: return False def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): if len(self._buckets ) <= self._initial_block_size: return False __lowercase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ): __lowercase = self._buckets __lowercase = [None] * new_size __lowercase = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def SCREAMING_SNAKE_CASE ( self : str ): self._resize(len(self._buckets ) * 2 ) def SCREAMING_SNAKE_CASE ( self : Tuple ): self._resize(len(self._buckets ) // 2 ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ): __lowercase = self._get_bucket_index(lowercase__ ) for _ in range(len(self._buckets ) ): yield ind __lowercase = self._get_next_ind(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): for ind in self._iterate_buckets(lowercase__ ): if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ): break def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): if self._is_full(): self._size_up() self._add_item(lowercase__ ,lowercase__ ) def __delitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: raise KeyError(lowercase__ ) if item is _deleted: continue if item.key == key: __lowercase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowercase__ ) def __len__( self : Optional[int] ): return self._len def __iter__( self : str ): yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ): __lowercase = ''' ,'''.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
41
0
import doctest from collections import deque import numpy as np class __A : def __init__( self :Optional[Any] ): '''simple docstring''' __magic_name__ : str =[2, 1, 2, -1] __magic_name__ : Tuple =[1, 2, 3, 4] def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : List[Any] =len(self.first_signal ) __magic_name__ : Dict =len(self.second_signal ) __magic_name__ : int =max(lowercase__ , lowercase__ ) # create a zero matrix of max_length x max_length __magic_name__ : Dict =[[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __magic_name__ : Union[str, Any] =deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __magic_name__ : Tuple =np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
21
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ,**lowercase__ : Tuple ): super().__init__(**lowercase__ ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self ,'''vision''' ) self.check_model_type(lowercase__ ) def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,): if "text_queries" in kwargs: __lowercase = kwargs.pop('''text_queries''' ) if isinstance(lowercase__ ,(str, Image.Image) ): __lowercase = {'''image''': image, '''candidate_labels''': candidate_labels} else: __lowercase = image __lowercase = super().__call__(lowercase__ ,**lowercase__ ) return results def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ): __lowercase = {} if "threshold" in kwargs: __lowercase = kwargs['''threshold'''] if "top_k" in kwargs: __lowercase = kwargs['''top_k'''] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ): __lowercase = load_image(inputs['''image'''] ) __lowercase = inputs['''candidate_labels'''] if isinstance(lowercase__ ,lowercase__ ): __lowercase = candidate_labels.split(''',''' ) __lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa ) for i, candidate_label in enumerate(lowercase__ ): __lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework ) __lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework ) yield { "is_last": i == len(lowercase__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ): __lowercase = model_inputs.pop('''target_size''' ) __lowercase = model_inputs.pop('''candidate_label''' ) __lowercase = model_inputs.pop('''is_last''' ) __lowercase = self.model(**lowercase__ ) __lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ): __lowercase = [] for model_output in model_outputs: __lowercase = model_output['''candidate_label'''] __lowercase = BaseModelOutput(lowercase__ ) __lowercase = self.image_processor.post_process_object_detection( outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): __lowercase = outputs['''scores'''][index].item() __lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] ) __lowercase = {'''score''': score, '''label''': label, '''box''': box} results.append(lowercase__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ ) if top_k: __lowercase = results[:top_k] return results def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ): if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) __lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist() __lowercase = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
41
0
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCAmelCase ={ "config": [ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "OnnxConfig", "OnnxConfigWithPast", "OnnxSeq2SeqConfigWithPast", "PatchingSpec", ], "convert": ["export", "validate_model_outputs"], "features": ["FeaturesManager"], "utils": ["ParameterFormat", "compute_serialized_parameters_size"], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
617
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli' SCREAMING_SNAKE_CASE : Optional[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) SCREAMING_SNAKE_CASE : Any = 'text_classifier' SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']] SCREAMING_SNAKE_CASE : List[str] = ['text'] def SCREAMING_SNAKE_CASE ( self : List[Any] ): super().setup() __lowercase = self.model.config __lowercase = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __lowercase = int(lowercase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ): __lowercase = labels return self.pre_processor( [text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = outputs.logits __lowercase = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
41
0
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowerCAmelCase_ : Dict = 0 lowerCAmelCase_ : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCAmelCase_ : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowerCAmelCase_ : str = tuple[int, int] class UpperCamelCase_ : def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> List[Any]: """simple docstring""" UpperCAmelCase = pos_x UpperCAmelCase = pos_y UpperCAmelCase = (pos_y, pos_x) UpperCAmelCase = goal_x UpperCAmelCase = goal_y UpperCAmelCase = g_cost UpperCAmelCase = parent UpperCAmelCase = self.calculate_heuristic() UpperCAmelCase = self.g_cost + self.h_cost def UpperCamelCase_ ( self ) -> str: """simple docstring""" UpperCAmelCase = self.pos_x - self.goal_x UpperCAmelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase__ ) + abs(lowercase__ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , snake_case__ ) -> Dict: """simple docstring""" return self.f_cost < other.f_cost class UpperCamelCase_ : def __init__( self , snake_case__ , snake_case__ ) -> Optional[int]: """simple docstring""" UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase__ ) UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowercase__ ) UpperCAmelCase = [self.start] UpperCAmelCase = [] UpperCAmelCase = False def UpperCamelCase_ ( self ) -> Optional[int]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase__ ) self.closed_nodes.append(lowercase__ ) UpperCAmelCase = self.get_successors(lowercase__ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase__ ) else: # retrieve the best current path UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowercase__ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase__ ) else: self.open_nodes.append(lowercase__ ) return [self.start.pos] def UpperCamelCase_ ( self , snake_case__ ) -> List[Any]: """simple docstring""" UpperCAmelCase = [] for action in delta: UpperCAmelCase = parent.pos_x + action[1] UpperCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase__ , lowercase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase__ , ) ) return successors def UpperCamelCase_ ( self , snake_case__ ) -> str: """simple docstring""" UpperCAmelCase = node UpperCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase = current_node.parent path.reverse() return path class UpperCamelCase_ : def __init__( self , snake_case__ , snake_case__ ) -> List[Any]: """simple docstring""" UpperCAmelCase = AStar(lowercase__ , lowercase__ ) UpperCAmelCase = AStar(lowercase__ , lowercase__ ) UpperCAmelCase = False def UpperCamelCase_ ( self ) -> int: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase__ , lowercase__ ) self.fwd_astar.closed_nodes.append(lowercase__ ) self.bwd_astar.closed_nodes.append(lowercase__ ) UpperCAmelCase = current_bwd_node UpperCAmelCase = current_fwd_node UpperCAmelCase = { self.fwd_astar: self.fwd_astar.get_successors(lowercase__ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase__ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase__ ) else: # retrieve the best current path UpperCAmelCase = astar.open_nodes.pop( astar.open_nodes.index(lowercase__ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase__ ) else: astar.open_nodes.append(lowercase__ ) return [self.fwd_astar.start.pos] def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> Dict: """simple docstring""" UpperCAmelCase = self.fwd_astar.retrace_path(lowercase__ ) UpperCAmelCase = self.bwd_astar.retrace_path(lowercase__ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowerCAmelCase_ : Tuple = (0, 0) lowerCAmelCase_ : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCAmelCase_ : Optional[Any] = time.time() lowerCAmelCase_ : str = AStar(init, goal) lowerCAmelCase_ : Dict = a_star.search() lowerCAmelCase_ : int = time.time() - start_time print(F'AStar execution time = {end_time:f} seconds') lowerCAmelCase_ : Union[str, Any] = time.time() lowerCAmelCase_ : Optional[Any] = BidirectionalAStar(init, goal) lowerCAmelCase_ : List[str] = time.time() - bd_start_time print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
673
'''simple docstring''' from collections.abc import Callable class lowercase_ : """simple docstring""" def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ): # Stores actual heap items. __lowercase = [] # Stores indexes of each item for supporting updates and deletion. __lowercase = {} # Stores current size of heap. __lowercase = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __lowercase = key or (lambda lowercase__ : x) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ): return int((i - 1) / 2 ) if i > 0 else None def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = int(2 * i + 1 ) return left if 0 < left < self.size else None def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ): __lowercase = int(2 * i + 2 ) return right if 0 < right < self.size else None def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ): __lowercase , __lowercase = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __lowercase , __lowercase = self.arr[j], self.arr[i] def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): return self.arr[i][1] < self.arr[j][1] def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._left(lowercase__ ) __lowercase = self._right(lowercase__ ) __lowercase = i if left is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = left if right is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = right return valid_parent def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._parent(lowercase__ ) while parent is not None and not self._cmp(lowercase__ ,lowercase__ ): self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = parent, self._parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = self._get_valid_parent(lowercase__ ) while valid_parent != index: self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] __lowercase = [item, self.key(lowercase__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] del self.pos_map[item] __lowercase = self.arr[self.size - 1] __lowercase = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ): __lowercase = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowercase__ )] ) else: __lowercase = [item, self.key(lowercase__ )] __lowercase = self.size self.size += 1 self._heapify_up(self.size - 1 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): return self.arr[0] if self.size else None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _A ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
41
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' a_ : jnp.ndarray a_ : jnp.ndarray class _UpperCamelCase ( nn.Module ): '''simple docstring''' a_ : int a_ : Tuple[int] = (16, 32, 96, 256) a_ : jnp.dtype = jnp.floataa def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : Dict = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : List[str] = [] for i in range(len(self.block_out_channels ) - 1 ): __lowerCamelCase : List[Any] = self.block_out_channels[i] __lowerCamelCase : Tuple = self.block_out_channels[i + 1] __lowerCamelCase : str = nn.Conv( lowercase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(lowercase__ ) __lowerCamelCase : str = nn.Conv( lowercase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(lowercase__ ) __lowerCamelCase : Dict = blocks __lowerCamelCase : Dict = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : List[str] , _lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCamelCase : int = self.conv_in(lowercase__ ) __lowerCamelCase : Union[str, Any] = nn.silu(lowercase__ ) for block in self.blocks: __lowerCamelCase : List[str] = block(lowercase__ ) __lowerCamelCase : Optional[int] = nn.silu(lowercase__ ) __lowerCamelCase : Dict = self.conv_out(lowercase__ ) return embedding @flax_register_to_config class _UpperCamelCase ( nn.Module,lowerCamelCase__,lowerCamelCase__ ): '''simple docstring''' a_ : int = 32 a_ : int = 4 a_ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) a_ : Union[bool, Tuple[bool]] = False a_ : Tuple[int] = (320, 640, 1280, 1280) a_ : int = 2 a_ : Union[int, Tuple[int]] = 8 a_ : Optional[Union[int, Tuple[int]]] = None a_ : int = 1280 a_ : float = 0.0 a_ : bool = False a_ : jnp.dtype = jnp.floataa a_ : bool = True a_ : int = 0 a_ : str = "rgb" a_ : Tuple[int] = (16, 32, 96, 256) def _snake_case ( self : Optional[int] , _lowerCamelCase : jax.random.KeyArray ): '''simple docstring''' __lowerCamelCase : int = (1, self.in_channels, self.sample_size, self.sample_size) __lowerCamelCase : Tuple = jnp.zeros(lowercase__ , dtype=jnp.floataa ) __lowerCamelCase : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) __lowerCamelCase : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __lowerCamelCase : Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8) __lowerCamelCase : Union[str, Any] = jnp.zeros(lowercase__ , dtype=jnp.floataa ) __lowerCamelCase , __lowerCamelCase : Union[str, Any] = jax.random.split(lowercase__ ) __lowerCamelCase : Dict = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )["params"] def _snake_case ( self : Any ): '''simple docstring''' __lowerCamelCase : List[Any] = self.block_out_channels __lowerCamelCase : Any = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowerCamelCase : Tuple = self.num_attention_heads or self.attention_head_dim # input __lowerCamelCase : str = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __lowerCamelCase : str = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __lowerCamelCase : Optional[Any] = FlaxTimestepEmbedding(lowercase__ , dtype=self.dtype ) __lowerCamelCase : str = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) __lowerCamelCase : Union[str, Any] = self.only_cross_attention if isinstance(lowercase__ , lowercase__ ): __lowerCamelCase : int = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase__ , lowercase__ ): __lowerCamelCase : Optional[int] = (num_attention_heads,) * len(self.down_block_types ) # down __lowerCamelCase : str = [] __lowerCamelCase : Optional[int] = [] __lowerCamelCase : List[Any] = block_out_channels[0] __lowerCamelCase : Any = nn.Conv( lowercase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowercase__ ) for i, down_block_type in enumerate(self.down_block_types ): __lowerCamelCase : int = output_channel __lowerCamelCase : Union[str, Any] = block_out_channels[i] __lowerCamelCase : Dict = i == len(lowercase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowerCamelCase : List[str] = FlaxCrossAttnDownBlockaD( in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: __lowerCamelCase : str = FlaxDownBlockaD( in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase__ ) for _ in range(self.layers_per_block ): __lowerCamelCase : Any = nn.Conv( lowercase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowercase__ ) if not is_final_block: __lowerCamelCase : Dict = nn.Conv( lowercase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowercase__ ) __lowerCamelCase : Dict = down_blocks __lowerCamelCase : Union[str, Any] = controlnet_down_blocks # mid __lowerCamelCase : int = block_out_channels[-1] __lowerCamelCase : Optional[Any] = FlaxUNetMidBlockaDCrossAttn( in_channels=lowercase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) __lowerCamelCase : Optional[Any] = nn.Conv( lowercase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : float = 1.0 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = False , ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = self.controlnet_conditioning_channel_order if channel_order == "bgr": __lowerCamelCase : Dict = jnp.flip(lowercase__ , axis=1 ) # 1. time if not isinstance(lowercase__ , jnp.ndarray ): __lowerCamelCase : int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: __lowerCamelCase : int = timesteps.astype(dtype=jnp.floataa ) __lowerCamelCase : Dict = jnp.expand_dims(lowercase__ , 0 ) __lowerCamelCase : Any = self.time_proj(lowercase__ ) __lowerCamelCase : Dict = self.time_embedding(lowercase__ ) # 2. pre-process __lowerCamelCase : int = jnp.transpose(lowercase__ , (0, 2, 3, 1) ) __lowerCamelCase : Optional[Any] = self.conv_in(lowercase__ ) __lowerCamelCase : Any = jnp.transpose(lowercase__ , (0, 2, 3, 1) ) __lowerCamelCase : List[str] = self.controlnet_cond_embedding(lowercase__ ) sample += controlnet_cond # 3. down __lowerCamelCase : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(lowercase__ , lowercase__ ): __lowerCamelCase , __lowerCamelCase : Union[str, Any] = down_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train ) else: __lowerCamelCase , __lowerCamelCase : Dict = down_block(lowercase__ , lowercase__ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid __lowerCamelCase : str = self.mid_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train ) # 5. contronet blocks __lowerCamelCase : Dict = () for down_block_res_sample, controlnet_block in zip(lowercase__ , self.controlnet_down_blocks ): __lowerCamelCase : Dict = controlnet_block(lowercase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __lowerCamelCase : int = controlnet_down_block_res_samples __lowerCamelCase : int = self.controlnet_mid_block(lowercase__ ) # 6. scaling __lowerCamelCase : int = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowercase__ , mid_block_res_sample=lowercase__ )
519
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ): __lowercase = [] def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ): self.events.append('''on_init_end''' ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ): self.events.append('''on_train_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ): self.events.append('''on_train_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ): self.events.append('''on_epoch_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ): self.events.append('''on_epoch_end''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_step_begin''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ): self.events.append('''on_step_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ): self.events.append('''on_evaluate''' ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ): self.events.append('''on_predict''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ): self.events.append('''on_save''' ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_log''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ): self.events.append('''on_prediction_step''' ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): shutil.rmtree(self.output_dir ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ ) __lowercase = RegressionPreTrainedModel(lowercase__ ) __lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ ) return Trainer( lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) ) # Order doesn't matter __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase__ ,lowercase__ ): if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,lowercase__ ) elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,cba.__class__ ) elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(cba.__class__ ,lowercase__ ) else: self.assertEqual(lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ): __lowercase = ['''on_init_end''', '''on_train_begin'''] __lowercase = 0 __lowercase = len(trainer.get_eval_dataloader() ) __lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(lowercase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.get_trainer() __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # Callbacks passed at init are added to the default callbacks __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback __lowercase = self.get_trainer(disable_tqdm=lowercase__ ) __lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] __lowercase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(cb.__class__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # We can also add, pop, or remove by instance __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(lowercase__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' ,category=lowercase__ ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # Independent log/save/eval __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # A bit of everything __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(lowercase__ ) in warn_mock.call_args[0][0]
41
0
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def a_ ( __a , __a , __a , __a ): A__ = FunnelConfig.from_json_file(A__ ) print(f'''Building PyTorch model from configuration: {config}''' ) A__ = FunnelBaseModel(A__ ) if base_model else FunnelModel(A__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(A__ , A__ , A__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A__ ) if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.' ) __snake_case : str = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
571
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : jnp.ndarray SCREAMING_SNAKE_CASE : jnp.ndarray class lowercase_ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __lowercase = [] for i in range(len(self.block_out_channels ) - 1 ): __lowercase = self.block_out_channels[i] __lowercase = self.block_out_channels[i + 1] __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = blocks __lowercase = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : List[str] ,lowercase__ : Optional[int] ): __lowercase = self.conv_in(lowercase__ ) __lowercase = nn.silu(lowercase__ ) for block in self.blocks: __lowercase = block(lowercase__ ) __lowercase = nn.silu(lowercase__ ) __lowercase = self.conv_out(lowercase__ ) return embedding @flax_register_to_config class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 3_2 SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) SCREAMING_SNAKE_CASE : int = 2 SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None SCREAMING_SNAKE_CASE : int = 1_2_8_0 SCREAMING_SNAKE_CASE : float = 0.0 SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa SCREAMING_SNAKE_CASE : bool = True SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : str = "rgb" SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ): # init input tensors __lowercase = (1, self.in_channels, self.sample_size, self.sample_size) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase = jnp.ones((1,) ,dtype=jnp.intaa ) __lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) __lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase , __lowercase = jax.random.split(lowercase__ ) __lowercase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"] def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.block_out_channels __lowercase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowercase = self.num_attention_heads or self.attention_head_dim # input __lowercase = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time __lowercase = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) __lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype ) __lowercase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) __lowercase = self.only_cross_attention if isinstance(lowercase__ ,lowercase__ ): __lowercase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = (num_attention_heads,) * len(self.down_block_types ) # down __lowercase = [] __lowercase = [] __lowercase = block_out_channels[0] __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) for i, down_block_type in enumerate(self.down_block_types ): __lowercase = output_channel __lowercase = block_out_channels[i] __lowercase = i == len(lowercase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowercase = FlaxCrossAttnDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: __lowercase = FlaxDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(lowercase__ ) for _ in range(self.layers_per_block ): __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) if not is_final_block: __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) __lowercase = down_blocks __lowercase = controlnet_down_blocks # mid __lowercase = block_out_channels[-1] __lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,): __lowercase = self.controlnet_conditioning_channel_order if channel_order == "bgr": __lowercase = jnp.flip(lowercase__ ,axis=1 ) # 1. time if not isinstance(lowercase__ ,jnp.ndarray ): __lowercase = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0: __lowercase = timesteps.astype(dtype=jnp.floataa ) __lowercase = jnp.expand_dims(lowercase__ ,0 ) __lowercase = self.time_proj(lowercase__ ) __lowercase = self.time_embedding(lowercase__ ) # 2. pre-process __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.conv_in(lowercase__ ) __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.controlnet_cond_embedding(lowercase__ ) sample += controlnet_cond # 3. down __lowercase = (sample,) for down_block in self.down_blocks: if isinstance(lowercase__ ,lowercase__ ): __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) else: __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid __lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) # 5. contronet blocks __lowercase = () for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ): __lowercase = controlnet_block(lowercase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __lowercase = controlnet_down_block_res_samples __lowercase = self.controlnet_mid_block(lowercase__ ) # 6. scaling __lowercase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
41
0
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ : '''simple docstring''' def __init__( self: Dict , _lowerCamelCase: Dict , _lowerCamelCase: int=13 , _lowerCamelCase: List[str]=7 , _lowerCamelCase: int=True , _lowerCamelCase: int=True , _lowerCamelCase: Union[str, Any]=True , _lowerCamelCase: List[Any]=True , _lowerCamelCase: str=99 , _lowerCamelCase: Optional[Any]=32 , _lowerCamelCase: Union[str, Any]=5 , _lowerCamelCase: List[Any]=4 , _lowerCamelCase: str=37 , _lowerCamelCase: Tuple="gelu" , _lowerCamelCase: List[Any]=0.1 , _lowerCamelCase: Dict=0.1 , _lowerCamelCase: int=1_28 , _lowerCamelCase: Dict=32 , _lowerCamelCase: Dict=16 , _lowerCamelCase: Any=2 , _lowerCamelCase: int=0.02 , _lowerCamelCase: List[str]=3 , _lowerCamelCase: Dict=4 , _lowerCamelCase: Optional[int]=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope def _A ( self: Optional[int] ): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _A ( self: Union[str, Any] ): return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , ) def _A ( self: Union[str, Any] ): ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _A ( self: Optional[int] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: List[str] , _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: str ): SCREAMING_SNAKE_CASE_ = NezhaModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ ) SCREAMING_SNAKE_CASE_ = model(lowercase__ , token_type_ids=lowercase__ ) SCREAMING_SNAKE_CASE_ = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _A ( self: Optional[int] , _lowerCamelCase: Dict , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = NezhaModel(lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , ) SCREAMING_SNAKE_CASE_ = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , encoder_hidden_states=lowercase__ , ) SCREAMING_SNAKE_CASE_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _A ( self: Any , _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[Any] ): SCREAMING_SNAKE_CASE_ = NezhaForMaskedLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self: Optional[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: Any , _lowerCamelCase: int , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ): SCREAMING_SNAKE_CASE_ = NezhaForNextSentencePrediction(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _A ( self: Dict , _lowerCamelCase: str , _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: int , _lowerCamelCase: int ): SCREAMING_SNAKE_CASE_ = NezhaForPreTraining(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _A ( self: Optional[int] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: Optional[int] , _lowerCamelCase: Union[str, Any] ): SCREAMING_SNAKE_CASE_ = NezhaForQuestionAnswering(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: str , _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: Any , _lowerCamelCase: Optional[int] , _lowerCamelCase: int ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = NezhaForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self: Optional[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: int , _lowerCamelCase: List[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Any , _lowerCamelCase: Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = NezhaForTokenClassification(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A ( self: Optional[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: List[Any] , _lowerCamelCase: str ): SCREAMING_SNAKE_CASE_ = self.num_choices SCREAMING_SNAKE_CASE_ = NezhaForMultipleChoice(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A ( self: Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Tuple = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : List[str] = True def _A ( self: Dict , _lowerCamelCase: List[str] , _lowerCamelCase: str , _lowerCamelCase: Any=False ): SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ ) if return_labels: if model_class in get_values(lowercase__ ): SCREAMING_SNAKE_CASE_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ ) SCREAMING_SNAKE_CASE_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase__ ) return inputs_dict def _A ( self: Tuple ): SCREAMING_SNAKE_CASE_ = NezhaModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=lowercase__ , hidden_size=37 ) def _A ( self: int ): self.config_tester.run_common_tests() def _A ( self: int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def _A ( self: Any ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase__ ) def _A ( self: Any ): # This regression test was failing with PyTorch < 1.3 ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE_ = None self.model_tester.create_and_check_model_as_decoder( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) def _A ( self: int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase__ ) def _A ( self: Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase__ ) def _A ( self: int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ ) def _A ( self: Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase__ ) def _A ( self: str ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def _A ( self: str ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def _A ( self: int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @slow def _A ( self: Union[str, Any] ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = NezhaModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow @require_torch_gpu def _A ( self: Optional[int] ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(config=lowercase__ ) SCREAMING_SNAKE_CASE_ = self._prepare_for_class(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE_ = torch.jit.trace( lowercase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase__ , os.path.join(lowercase__ , '''bert.pt''' ) ) SCREAMING_SNAKE_CASE_ = torch.jit.load(os.path.join(lowercase__ , '''bert.pt''' ) , map_location=lowercase__ ) loaded(inputs_dict['''input_ids'''].to(lowercase__ ) , inputs_dict['''attention_mask'''].to(lowercase__ ) ) @require_torch class __magic_name__ ( unittest.TestCase): '''simple docstring''' @slow def _A ( self: int ): SCREAMING_SNAKE_CASE_ = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(lowercase__ , attention_mask=lowercase__ )[0] SCREAMING_SNAKE_CASE_ = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , lowercase__ ) SCREAMING_SNAKE_CASE_ = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) ) @slow def _A ( self: Dict ): SCREAMING_SNAKE_CASE_ = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(lowercase__ , attention_mask=lowercase__ )[0] SCREAMING_SNAKE_CASE_ = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , lowercase__ ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) )
234
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase__ = False lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '''ybelkada/fonts''' def _A ( ): """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " '''Pix2StructImageProcessor. Please upgrade torch.''' ) def _A ( A__ , A__ , A__ ): """simple docstring""" requires_backends(A__ , ['''torch'''] ) _check_torch_version() __lowercase = image_tensor.unsqueeze(0 ) __lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) __lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 ) __lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Add new lines so that each line is no more than 80 characters. __lowercase = textwrap.TextWrapper(width=80 ) __lowercase = wrapper.wrap(text=A__ ) __lowercase = '''\n'''.join(A__ ) if font_bytes is not None and font_path is None: __lowercase = io.BytesIO(A__ ) elif font_path is not None: __lowercase = font_path else: __lowercase = hf_hub_download(A__ , '''Arial.TTF''' ) __lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. __lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) ) __lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ ) # Create the actual image with a bit of padding around the text. __lowercase = text_width + left_padding + right_padding __lowercase = text_height + top_padding + bottom_padding __lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ ) __lowercase = ImageDraw.Draw(A__ ) draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ ) return image def _A ( A__ , A__ , **A__ ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Convert to PIL image if necessary __lowercase = to_pil_image(A__ ) __lowercase = render_text(A__ , **A__ ) __lowercase = max(header_image.width , image.width ) __lowercase = int(image.height * (new_width / image.width) ) __lowercase = int(header_image.height * (new_width / header_image.width) ) __lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary __lowercase = to_numpy_array(A__ ) if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST: __lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST ) return new_image class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches'] def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,): super().__init__(**lowercase__ ) __lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6} __lowercase = do_normalize __lowercase = do_convert_rgb __lowercase = max_patches __lowercase = is_vqa def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ): requires_backends(self.extract_flattened_patches ,'''torch''' ) _check_torch_version() # convert to torch __lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST ) __lowercase = torch.from_numpy(lowercase__ ) __lowercase , __lowercase = patch_size['''height'''], patch_size['''width'''] __lowercase , __lowercase = get_image_size(lowercase__ ) # maximize scale s.t. __lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) __lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 ) __lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 ) __lowercase = max(num_feasible_rows * patch_height ,1 ) __lowercase = max(num_feasible_cols * patch_width ,1 ) __lowercase = torch.nn.functional.interpolate( image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] __lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = patches.shape __lowercase = patches_shape[1] __lowercase = patches_shape[2] __lowercase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] __lowercase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] __lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] ) __lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] __lowercase = row_ids.to(torch.floataa ) __lowercase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] __lowercase = torch.cat([row_ids, col_ids, patches] ,-1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] __lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float() __lowercase = to_numpy_array(lowercase__ ) return result def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ): if image.dtype == np.uinta: __lowercase = image.astype(np.floataa ) # take mean across the whole `image` __lowercase = np.mean(lowercase__ ) __lowercase = np.std(lowercase__ ) __lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,): __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase = patch_size if patch_size is not None else self.patch_size __lowercase = max_patches if max_patches is not None else self.max_patches __lowercase = self.is_vqa if kwargs.get('''data_format''' ,lowercase__ ) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''' ) __lowercase = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase = [convert_to_rgb(lowercase__ ) for image in images] # All transformations expect numpy arrays. __lowercase = [to_numpy_array(lowercase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''' ) __lowercase = kwargs.pop('''font_bytes''' ,lowercase__ ) __lowercase = kwargs.pop('''font_path''' ,lowercase__ ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = [header_text] * len(lowercase__ ) __lowercase = [ render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ ) for i, image in enumerate(lowercase__ ) ] if do_normalize: __lowercase = [self.normalize(image=lowercase__ ) for image in images] # convert to torch tensor and permute __lowercase = [ self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ ) for image in images ] # create attention mask in numpy __lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] __lowercase = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ ) return encoded_outputs
41
0
'''simple docstring''' import os import sys import transformers __a: List[str] = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
152
'''simple docstring''' import doctest from collections import deque import numpy as np class lowercase_ : """simple docstring""" def __init__( self : Optional[Any] ): __lowercase = [2, 1, 2, -1] __lowercase = [1, 2, 3, 4] def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = len(self.first_signal ) __lowercase = len(self.second_signal ) __lowercase = max(lowercase__ ,lowercase__ ) # create a zero matrix of max_length x max_length __lowercase = [[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __lowercase = deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ ,2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
41
0
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __A = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __A = direct_transformers_import(PATH_TO_TRANSFORMERS) __A = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __A = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)") __A = { "DecisionTransformerConfig", "EncoderDecoderConfig", "MusicgenConfig", "RagConfig", "SpeechEncoderDecoderConfig", "TimmBackboneConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", "LlamaConfig", } def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: lowercase__: List[str] = None # source code of `config_class` lowercase__: Dict = inspect.getsource(A__ ) lowercase__: Optional[Any] = _re_checkpoint.findall(A__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowercase__: List[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowercase__: Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowercase__: Union[str, Any] = ckpt_name break return checkpoint def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: lowercase__: Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowercase__: str = get_checkpoint_from_config_class(A__ ) lowercase__: str = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(A__ ) if len(A__ ) > 0: lowercase__: Dict = '''\n'''.join(sorted(A__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
586
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
41
0
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow lowerCAmelCase_ = logging.getLogger() @unittest.skip("Temporarily disable the doc tests." ) @require_torch @require_tf @slow class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] , _A : Path , _A : Union[str, None] = None , _A : Union[List[str], None] = None , _A : Union[str, List[str], None] = None , _A : bool = True , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = [file for file in os.listdir(lowercase__) if os.path.isfile(os.path.join(lowercase__ , lowercase__))] if identifier is not None: _SCREAMING_SNAKE_CASE : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase__ , lowercase__): for n_ in n_identifier: _SCREAMING_SNAKE_CASE : Union[str, Any] = [file for file in files if n_ not in file] else: _SCREAMING_SNAKE_CASE : int = [file for file in files if n_identifier not in file] _SCREAMING_SNAKE_CASE : Union[str, Any] = ignore_files or [] ignore_files.append("""__init__.py""") _SCREAMING_SNAKE_CASE : Any = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , lowercase__) if only_modules: _SCREAMING_SNAKE_CASE : Dict = file.split(""".""")[0] try: _SCREAMING_SNAKE_CASE : Any = getattr(lowercase__ , lowercase__) _SCREAMING_SNAKE_CASE : Tuple = doctest.DocTestSuite(lowercase__) _SCREAMING_SNAKE_CASE : List[str] = unittest.TextTestRunner().run(lowercase__) self.assertIs(len(result.failures) , 0) except AttributeError: logger.info(f"""{module_identifier} is not a module.""") else: _SCREAMING_SNAKE_CASE : Dict = doctest.testfile(str("""..""" / directory / file) , optionflags=doctest.ELLIPSIS) self.assertIs(result.failed , 0) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = Path("""src/transformers""") _SCREAMING_SNAKE_CASE : Optional[Any] = """modeling""" _SCREAMING_SNAKE_CASE : List[str] = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(lowercase__ , identifier=lowercase__ , ignore_files=lowercase__) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = Path("""src/transformers""") _SCREAMING_SNAKE_CASE : List[str] = """tokenization""" self.analyze_directory(lowercase__ , identifier=lowercase__) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = Path("""src/transformers""") _SCREAMING_SNAKE_CASE : Any = """configuration""" self.analyze_directory(lowercase__ , identifier=lowercase__) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = Path("""src/transformers""") _SCREAMING_SNAKE_CASE : Optional[int] = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(lowercase__ , n_identifier=lowercase__) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : str = Path("""docs/source""") _SCREAMING_SNAKE_CASE : int = ["""favicon.ico"""] self.analyze_directory(lowercase__ , ignore_files=lowercase__ , only_modules=lowercase__)
338
'''simple docstring''' import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase__ = getLogger(__name__) lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ): """simple docstring""" __lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' ) __lowercase = str(A__ ) __lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ ) if fpaa: __lowercase = model.half() __lowercase = AutoTokenizer.from_pretrained(A__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __lowercase = time.time() # update config with task specific params use_task_specific_params(A__ , A__ ) if prefix is None: __lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ): __lowercase = [prefix + text for text in examples_chunk] __lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ ) __lowercase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , ) __lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __lowercase = int(time.time() - start_time ) # seconds __lowercase = len(A__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _A ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def _A ( A__=True ): """simple docstring""" __lowercase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __lowercase , __lowercase = parser.parse_known_args() __lowercase = parse_numeric_n_bool_cl_kwargs(A__ ) if parsed_args and verbose: print(F"parsed the following generate kwargs: {parsed_args}" ) __lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __lowercase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=A__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __lowercase = generate_summaries_or_translations( A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , ) if args.reference_path is None: return {} # Compute scores __lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge __lowercase = [x.rstrip() for x in open(args.save_path ).readlines()] __lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )] __lowercase = score_fn(A__ , A__ ) scores.update(A__ ) if args.dump_args: scores.update(A__ ) if args.info: __lowercase = args.info if verbose: print(A__ ) if args.score_path is not None: json.dump(A__ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
41
0
import os import sys UpperCAmelCase__ = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) UpperCAmelCase__ = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def a_ (*__A , **__A ) -> Dict: """simple docstring""" return AutoConfig.from_pretrained(*A__ , **A__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a_ (*__A , **__A ) -> str: """simple docstring""" return AutoTokenizer.from_pretrained(*A__ , **A__ ) @add_start_docstrings(AutoModel.__doc__ ) def a_ (*__A , **__A ) -> int: """simple docstring""" return AutoModel.from_pretrained(*A__ , **A__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a_ (*__A , **__A ) -> List[str]: """simple docstring""" return AutoModelForCausalLM.from_pretrained(*A__ , **A__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a_ (*__A , **__A ) -> int: """simple docstring""" return AutoModelForMaskedLM.from_pretrained(*A__ , **A__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a_ (*__A , **__A ) -> List[str]: """simple docstring""" return AutoModelForSequenceClassification.from_pretrained(*A__ , **A__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a_ (*__A , **__A ) -> str: """simple docstring""" return AutoModelForQuestionAnswering.from_pretrained(*A__ , **A__ )
351
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ ): """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(A__ ): print(F"{i}\t\t{d}" ) def _A ( A__ , A__ , A__ ): """simple docstring""" for j in range(A__ ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = [float('''inf''' )] * vertex_count __lowercase = 0.0 for _ in range(vertex_count - 1 ): for j in range(A__ ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __lowercase = distance[u] + w __lowercase = check_negative_cycle(A__ , A__ , A__ ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip()) lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip()) lowerCAmelCase__ = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight} lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip()) lowerCAmelCase__ = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
41
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers A_ : Tuple = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def __snake_case ( ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE : int = os.path.dirname(os.path.realpath(A__ ) ) SCREAMING_SNAKE_CASE : int = os.path.join(A__ , 'words.txt' ) SCREAMING_SNAKE_CASE : str = '' with open(A__ ) as f: SCREAMING_SNAKE_CASE : Dict = f.readline() SCREAMING_SNAKE_CASE : List[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] SCREAMING_SNAKE_CASE : Optional[int] = [ word for word in [sum(ord(A__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(A__ ) if __name__ == "__main__": print(solution())
265
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ): warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' ,lowercase__ ,) super().__init__(*lowercase__ ,**lowercase__ )
41
0
import argparse import os import re UpperCAmelCase_ : List[Any] = "src/diffusers" # Pattern that looks at the indentation in a line. UpperCAmelCase_ : Dict = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. UpperCAmelCase_ : List[str] = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCAmelCase_ : Any = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. UpperCAmelCase_ : str = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCAmelCase_ : str = re.compile(R"\[([^\]]+)\]") def lowerCAmelCase_ ( lowerCamelCase ): __magic_name__ : str =_re_indent.search(A__ ) return "" if search is None else search.groups()[0] def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase="" , lowerCamelCase=None , lowerCamelCase=None ): __magic_name__ : Optional[Any] =0 __magic_name__ : Tuple =code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(A__ ): index += 1 __magic_name__ : Optional[int] =["""\n""".join(lines[:index] )] else: __magic_name__ : List[str] =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). __magic_name__ : Any =[lines[index]] index += 1 while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(A__ ) ) if index < len(A__ ) - 1: __magic_name__ : int =[lines[index + 1]] index += 1 else: __magic_name__ : Optional[int] =[] else: blocks.append("""\n""".join(A__ ) ) __magic_name__ : str =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(A__ ) > 0: blocks.append("""\n""".join(A__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(A__ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def lowerCAmelCase_ ( lowerCamelCase ): def _inner(lowerCamelCase ): return key(A__ ).lower().replace("""_""" , """""" ) return _inner def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None ): def noop(lowerCamelCase ): return x if key is None: __magic_name__ : str =noop # Constants are all uppercase, they go first. __magic_name__ : Optional[int] =[obj for obj in objects if key(A__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __magic_name__ : List[Any] =[obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()] # Functions begin with a lowercase, they go last. __magic_name__ : Any =[obj for obj in objects if not key(A__ )[0].isupper()] __magic_name__ : Any =ignore_underscore(A__ ) return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) def lowerCAmelCase_ ( lowerCamelCase ): def _replace(lowerCamelCase ): __magic_name__ : Tuple =match.groups()[0] if "," not in imports: return F"[{imports}]" __magic_name__ : List[str] =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __magic_name__ : Dict =keys[:-1] return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]" __magic_name__ : Optional[Any] =import_statement.split("""\n""" ) if len(A__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __magic_name__ : List[str] =2 if lines[1].strip() == """[""" else 1 __magic_name__ : str =[(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __magic_name__ : Any =sort_objects(A__ , key=lambda lowerCamelCase : x[1] ) __magic_name__ : Union[str, Any] =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(A__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __magic_name__ : Union[str, Any] =_re_bracket_content.sub(_replace , lines[1] ) else: __magic_name__ : str =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __magic_name__ : str =keys[:-1] __magic_name__ : Optional[int] =get_indent(lines[1] ) + """, """.join([F"\"{k}\"" for k in sort_objects(A__ )] ) return "\n".join(A__ ) else: # Finally we have to deal with imports fitting on one line __magic_name__ : List[Any] =_re_bracket_content.sub(_replace , A__ ) return import_statement def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=True ): with open(A__ , """r""" ) as f: __magic_name__ : List[str] =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __magic_name__ : Optional[Any] =split_code_in_indented_blocks( A__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(A__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __magic_name__ : Optional[int] =main_blocks[block_idx] __magic_name__ : List[Any] =block.split("""\n""" ) # Get to the start of the imports. __magic_name__ : Optional[Any] =0 while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __magic_name__ : List[str] =len(A__ ) else: line_idx += 1 if line_idx >= len(A__ ): continue # Ignore beginning and last line: they don't contain anything. __magic_name__ : Optional[Any] ="""\n""".join(block_lines[line_idx:-1] ) __magic_name__ : int =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __magic_name__ : List[Any] =split_code_in_indented_blocks(A__ , indent_level=A__ ) # We have two categories of import key: list or _import_structure[key].append/extend __magic_name__ : str =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __magic_name__ : Optional[Any] =[(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __magic_name__ : Dict =[(i, key) for i, key in enumerate(A__ ) if key is not None] __magic_name__ : Optional[Any] =[x[0] for x in sorted(A__ , key=lambda lowerCamelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __magic_name__ : int =0 __magic_name__ : Union[str, Any] =[] for i in range(len(A__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __magic_name__ : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(A__ ) count += 1 # And we put our main block back together with its first and last line. __magic_name__ : Tuple ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(A__ ): if check_only: return True else: print(F"Overwriting {file}." ) with open(A__ , """w""" ) as f: f.write("""\n""".join(A__ ) ) def lowerCAmelCase_ ( lowerCamelCase=True ): __magic_name__ : List[Any] =[] for root, _, files in os.walk(A__ ): if "__init__.py" in files: __magic_name__ : int =sort_imports(os.path.join(A__ , """__init__.py""" ) , check_only=A__ ) if result: __magic_name__ : List[Any] =[os.path.join(A__ , """__init__.py""" )] if len(A__ ) > 0: raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." ) if __name__ == "__main__": UpperCAmelCase_ : int = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") UpperCAmelCase_ : List[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
21
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _A ( A__ ): """simple docstring""" __lowercase = FileLock(str(tmpdir / '''foo.lock''' ) ) __lowercase = FileLock(str(tmpdir / '''foo.lock''' ) ) __lowercase = 0.0_1 with locka.acquire(): with pytest.raises(A__ ): __lowercase = time.time() locka.acquire(A__ ) assert time.time() - _start > timeout def _A ( A__ ): """simple docstring""" __lowercase = '''a''' * 1000 + '''.lock''' __lowercase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(A__ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 __lowercase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(A__ ): locka.acquire(0 )
41
0
"""simple docstring""" class lowerCamelCase__ : # Public class to implement a graph '''simple docstring''' def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple: A = row A = col A = graph def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int: # Checking all 8 elements surrounding nth element A = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order A = [-1, 0, 1, -1, 1, -1, 0, 1] A = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,lowercase__ ): self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,lowercase__ ) def UpperCamelCase__ ( self ) -> int: # And finally, count all islands. A = [[False for j in range(self.COL )] for i in range(self.ROW )] A = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(lowercase__ ,lowercase__ ,lowercase__ ) count += 1 return count
617
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase__ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
41
0
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( lowerCamelCase__ , unittest.TestCase ): _A : str = XLNetTokenizer _A : Optional[int] = XLNetTokenizerFast _A : int = True _A : List[str] = True def UpperCamelCase_ ( self ) -> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = XLNetTokenizer(lowercase__ , keep_accents=lowercase__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ) -> str: """simple docstring""" UpperCAmelCase = """<s>""" UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def UpperCamelCase_ ( self ) -> List[str]: """simple docstring""" UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(lowercase__ ) , 10_06 ) def UpperCamelCase_ ( self ) -> Any: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def UpperCamelCase_ ( self ) -> List[str]: """simple docstring""" UpperCAmelCase = XLNetTokenizer(lowercase__ , keep_accents=lowercase__ ) UpperCAmelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowercase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [2_85, 46, 10, 1_70, 3_82] ) UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ ) self.assertListEqual(lowercase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase__ ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def UpperCamelCase_ ( self ) -> List[str]: """simple docstring""" UpperCAmelCase = XLNetTokenizer(lowercase__ , do_lower_case=lowercase__ ) UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def UpperCamelCase_ ( self ) -> int: """simple docstring""" UpperCAmelCase = XLNetTokenizer(lowercase__ , do_lower_case=lowercase__ ) UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def UpperCamelCase_ ( self ) -> List[str]: """simple docstring""" UpperCAmelCase = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) UpperCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase__ ) UpperCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase__ ) UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ ) UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def UpperCamelCase_ ( self ) -> Any: """simple docstring""" UpperCAmelCase = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
673
'''simple docstring''' import argparse import os import re lowerCAmelCase__ = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowerCAmelCase__ = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''') def _A ( A__ ): """simple docstring""" __lowercase = _re_indent.search(A__ ) return "" if search is None else search.groups()[0] def _A ( A__ , A__="" , A__=None , A__=None ): """simple docstring""" __lowercase = 0 __lowercase = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(A__ ): index += 1 __lowercase = ['''\n'''.join(lines[:index] )] else: __lowercase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __lowercase = [lines[index]] index += 1 while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(A__ ) ) if index < len(A__ ) - 1: __lowercase = [lines[index + 1]] index += 1 else: __lowercase = [] else: blocks.append('''\n'''.join(A__ ) ) __lowercase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(A__ ) > 0: blocks.append('''\n'''.join(A__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(A__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _A ( A__ ): """simple docstring""" def _inner(A__ ): return key(A__ ).lower().replace('''_''' , '''''' ) return _inner def _A ( A__ , A__=None ): """simple docstring""" def noop(A__ ): return x if key is None: __lowercase = noop # Constants are all uppercase, they go first. __lowercase = [obj for obj in objects if key(A__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()] # Functions begin with a lowercase, they go last. __lowercase = [obj for obj in objects if not key(A__ )[0].isupper()] __lowercase = ignore_underscore(A__ ) return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) def _A ( A__ ): """simple docstring""" def _replace(A__ ): __lowercase = match.groups()[0] if "," not in imports: return F"[{imports}]" __lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]" __lowercase = import_statement.split('''\n''' ) if len(A__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __lowercase = 2 if lines[1].strip() == '''[''' else 1 __lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __lowercase = sort_objects(A__ , key=lambda A__ : x[1] ) __lowercase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(A__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __lowercase = _re_bracket_content.sub(_replace , lines[1] ) else: __lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] __lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] ) return "\n".join(A__ ) else: # Finally we have to deal with imports fitting on one line __lowercase = _re_bracket_content.sub(_replace , A__ ) return import_statement def _A ( A__ , A__=True ): """simple docstring""" with open(A__ , '''r''' ) as f: __lowercase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __lowercase = split_code_in_indented_blocks( A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(A__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __lowercase = main_blocks[block_idx] __lowercase = block.split('''\n''' ) # Get to the start of the imports. __lowercase = 0 while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __lowercase = len(A__ ) else: line_idx += 1 if line_idx >= len(A__ ): continue # Ignore beginning and last line: they don't contain anything. __lowercase = '''\n'''.join(block_lines[line_idx:-1] ) __lowercase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ ) # We have two categories of import key: list or _import_structure[key].append/extend __lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None] __lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __lowercase = 0 __lowercase = [] for i in range(len(A__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(A__ ) count += 1 # And we put our main block back together with its first and last line. __lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(A__ ): if check_only: return True else: print(F"Overwriting {file}." ) with open(A__ , '''w''' ) as f: f.write('''\n'''.join(A__ ) ) def _A ( A__=True ): """simple docstring""" __lowercase = [] for root, _, files in os.walk(A__ ): if "__init__.py" in files: __lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ ) if result: __lowercase = [os.path.join(A__ , '''__init__.py''' )] if len(A__ ) > 0: raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowerCAmelCase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
41
0
import numpy as np import qiskit def _UpperCAmelCase ( UpperCAmelCase : int = 8 , UpperCAmelCase : List[Any] = None ): """simple docstring""" __lowerCamelCase : List[Any] = np.random.default_rng(seed=A__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __lowerCamelCase : Optional[int] = 6 * key_len # Measurement basis for Alice's qubits. __lowerCamelCase : Tuple = rng.integers(2 , size=A__ ) # The set of states Alice will prepare. __lowerCamelCase : str = rng.integers(2 , size=A__ ) # Measurement basis for Bob's qubits. __lowerCamelCase : List[str] = rng.integers(2 , size=A__ ) # Quantum Circuit to simulate BB84 __lowerCamelCase : List[str] = qiskit.QuantumCircuit(A__ , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A__ ): if alice_state[index] == 1: bbaa_circ.x(A__ ) if alice_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A__ ): if bob_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __lowerCamelCase : Dict = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __lowerCamelCase : Union[str, Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ ) # Returns the result of measurement. __lowerCamelCase : Optional[Any] = job.result().get_counts(A__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __lowerCamelCase : str = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A__ , A__ , A__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __lowerCamelCase : Optional[int] = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , """0""" ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
519
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. SCREAMING_SNAKE_CASE : Optional[int] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'return_dict', 'callback', 'callback_steps', ] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,) __lowercase = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,) torch.manual_seed(0 ) __lowercase = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,) __lowercase = CLIPTextModel(lowercase__ ) __lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowercase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ): if str(lowercase__ ).startswith('''mps''' ): __lowercase = torch.manual_seed(lowercase__ ) else: __lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __lowercase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = TextToVideoSDPipeline(**lowercase__ ) __lowercase = sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) __lowercase = self.get_dummy_inputs(lowercase__ ) __lowercase = '''np''' __lowercase = sd_pipe(**lowercase__ ).frames __lowercase = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) __lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def SCREAMING_SNAKE_CASE ( self : Any ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): pass def SCREAMING_SNAKE_CASE ( self : List[str] ): return super().test_progress_bar() @slow @skip_mps class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) __lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowercase = pipe.to('''cuda''' ) __lowercase = '''Spiderman is surfing''' __lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames __lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) __lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __lowercase = pipe.to('''cuda''' ) __lowercase = '''Spiderman is surfing''' __lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames __lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
41
0
"""simple docstring""" def a_ ( __a ): A__ = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def a_ ( __a ): A__ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key A__ = remove_duplicates(key.upper() ) A__ = len(A__ ) # First fill cipher with key characters A__ = {alphabet[i]: char for i, char in enumerate(A__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(A__ ) , 26 ): A__ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 A__ = alphabet[i - offset] A__ = char return cipher_alphabet def a_ ( __a , __a ): return "".join(cipher_map.get(A__ , A__ ) for ch in message.upper() ) def a_ ( __a , __a ): A__ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(A__ , A__ ) for ch in message.upper() ) def a_ ( ): A__ = input('''Enter message to encode or decode: ''' ).strip() A__ = input('''Enter keyword: ''' ).strip() A__ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: A__ = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) A__ = create_cipher_map(A__ ) print(func(A__ , A__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
571
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _A ( A__ ): """simple docstring""" __lowercase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def _A ( A__ ): """simple docstring""" __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(A__ , A__ , bias=A__ ) __lowercase = emb.weight.data return lin_layer def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ): """simple docstring""" __lowercase = torch.load(A__ , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A__ ) __lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0] __lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ ) if mbart_aa and finetuned: __lowercase = '''relu''' __lowercase = state_dict['''decoder.embed_tokens.weight'''] __lowercase = MBartForConditionalGeneration(A__ ) model.model.load_state_dict(A__ ) if finetuned: __lowercase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
41
0
from importlib import import_module from .logging import get_logger __SCREAMING_SNAKE_CASE =get_logger(__name__) class __magic_name__ : '''simple docstring''' def __init__( self: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: Union[str, Any]=None ): SCREAMING_SNAKE_CASE_ = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , lowercase__ , getattr(lowercase__ , lowercase__ ) ) SCREAMING_SNAKE_CASE_ = module._original_module if isinstance(lowercase__ , _PatchedModuleObj ) else module class __magic_name__ : '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [] def __init__( self: Tuple , _lowerCamelCase: Dict , _lowerCamelCase: str , _lowerCamelCase: List[str] , _lowerCamelCase: Any=None ): SCREAMING_SNAKE_CASE_ = obj SCREAMING_SNAKE_CASE_ = target SCREAMING_SNAKE_CASE_ = new SCREAMING_SNAKE_CASE_ = target.split('''.''' )[0] SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = attrs or [] def __enter__( self: Union[str, Any] ): *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase__ ) ): try: SCREAMING_SNAKE_CASE_ = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): SCREAMING_SNAKE_CASE_ = getattr(self.obj , lowercase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): SCREAMING_SNAKE_CASE_ = obj_attr # patch at top level setattr(self.obj , lowercase__ , _PatchedModuleObj(lowercase__ , attrs=self.attrs ) ) SCREAMING_SNAKE_CASE_ = getattr(self.obj , lowercase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase__ , lowercase__ , _PatchedModuleObj(getattr(lowercase__ , lowercase__ , lowercase__ ) , attrs=self.attrs ) ) SCREAMING_SNAKE_CASE_ = getattr(lowercase__ , lowercase__ ) # finally set the target attribute setattr(lowercase__ , lowercase__ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: SCREAMING_SNAKE_CASE_ = getattr(import_module('''.'''.join(lowercase__ ) ) , lowercase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowercase__ ) is attr_value: SCREAMING_SNAKE_CASE_ = getattr(self.obj , lowercase__ ) setattr(self.obj , lowercase__ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" SCREAMING_SNAKE_CASE_ = globals()['''__builtins__'''][target_attr] setattr(self.obj , lowercase__ , self.new ) else: raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule." ) def __exit__( self: Tuple , *_lowerCamelCase: Any ): for attr in list(self.original ): setattr(self.obj , lowercase__ , self.original.pop(lowercase__ ) ) def _A ( self: int ): self.__enter__() self._active_patches.append(self ) def _A ( self: Dict ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
234
'''simple docstring''' import os from math import logaa def _A ( A__ = "base_exp.txt" ): """simple docstring""" __lowercase = 0 __lowercase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ): __lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) ) if x * logaa(A__ ) > largest: __lowercase = x * logaa(A__ ) __lowercase = i + 1 return result if __name__ == "__main__": print(solution())
41
0
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __UpperCamelCase ( UpperCAmelCase ): for param in module.parameters(): lowercase__ : str = False def __UpperCamelCase ( ): lowercase__ : str = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase__ : Dict = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Tuple = plt.imshow(A__ ) fig.axes.get_xaxis().set_visible(A__ ) fig.axes.get_yaxis().set_visible(A__ ) plt.show() def __UpperCamelCase ( ): lowercase__ : Union[str, Any] = datetime.now() lowercase__ : List[Any] = current_time.strftime('''%H:%M:%S''' ) return timestamp
152
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small' SCREAMING_SNAKE_CASE : int = ['past_key_values'] SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,): __lowercase = vocab_size __lowercase = max_position_embeddings __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = use_cache __lowercase = encoder_layers __lowercase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,) class lowercase_ (lowerCamelCase__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase = {0: '''batch'''} __lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super().outputs else: __lowercase = super(lowercase__ ,self ).outputs if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) # Generate decoder inputs __lowercase = seq_length if not self.use_past else 1 __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __lowercase = dict(**lowercase__ ,**lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape __lowercase = common_inputs['''decoder_input_ids'''].shape[1] __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = decoder_seq_length + 3 __lowercase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 ) __lowercase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase , __lowercase = self.num_layers __lowercase = min(lowercase__ ,lowercase__ ) __lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers __lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase__ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), ) ) # TODO: test this. __lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase__ ,lowercase__ ): common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __lowercase = seqlen + 2 __lowercase , __lowercase = self.num_layers __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = common_inputs['''attention_mask'''].dtype __lowercase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 ) __lowercase = [ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ ) ] return common_inputs def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = tokenizer.num_special_tokens_to_add(lowercase__ ) __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ ) # Generate dummy inputs according to compute batch and sequence __lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): if self.task in ["default", "seq2seq-lm"]: __lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) elif self.task == "causal-lm": __lowercase = self._generate_dummy_inputs_for_causal_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) else: __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) else: __lowercase = super(lowercase__ ,self )._flatten_past_key_values_( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
41
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __A = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
586
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ ): """simple docstring""" if b == 0: return (1, 0) ((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b ) __lowercase = a // b return (y, x - k * y) def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m def _A ( A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) if b < 0: __lowercase = (b % n + n) % n return b def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
41
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = 1 _SCREAMING_SNAKE_CASE : str = 3 _SCREAMING_SNAKE_CASE : int = (3_2, 3_2) _SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase__) return image @property def _lowerCAmelCase ( self : str): """simple docstring""" torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) return model @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self : str): """simple docstring""" torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Tuple = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(lowercase__) @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" def extract(*_A : int , **_A : Union[str, Any]): class _snake_case : """simple docstring""" def __init__( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = torch.ones([0]) def _lowerCAmelCase ( self : Tuple , _A : List[Any]): """simple docstring""" self.pixel_values.to(lowercase__) return self return Out() return extract def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator _SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_cond_unet _SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowercase__) _SCREAMING_SNAKE_CASE : Tuple = self.dummy_vae _SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder _SCREAMING_SNAKE_CASE : int = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""") _SCREAMING_SNAKE_CASE : Tuple = 7_7 _SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image.to(lowercase__) _SCREAMING_SNAKE_CASE : Union[str, Any] = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk _SCREAMING_SNAKE_CASE : int = AltDiffusionImgaImgPipeline( unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , ) _SCREAMING_SNAKE_CASE : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase__) _SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe.to(lowercase__) alt_pipe.set_progress_bar_config(disable=lowercase__) _SCREAMING_SNAKE_CASE : Any = """A painting of a squirrel eating a burger""" _SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowercase__).manual_seed(0) _SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe( [prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=lowercase__ , ) _SCREAMING_SNAKE_CASE : Tuple = output.images _SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowercase__).manual_seed(0) _SCREAMING_SNAKE_CASE : List[str] = alt_pipe( [prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=lowercase__ , return_dict=lowercase__ , )[0] _SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] _SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _SCREAMING_SNAKE_CASE : List[str] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""") def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_cond_unet _SCREAMING_SNAKE_CASE : Tuple = PNDMScheduler(skip_prk_steps=lowercase__) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae _SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder _SCREAMING_SNAKE_CASE : Optional[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""") _SCREAMING_SNAKE_CASE : Tuple = 7_7 _SCREAMING_SNAKE_CASE : int = self.dummy_image.to(lowercase__) # put models in fp16 _SCREAMING_SNAKE_CASE : Union[str, Any] = unet.half() _SCREAMING_SNAKE_CASE : Any = vae.half() _SCREAMING_SNAKE_CASE : Optional[Any] = bert.half() # make sure here that pndm scheduler skips prk _SCREAMING_SNAKE_CASE : Optional[Any] = AltDiffusionImgaImgPipeline( unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , ) _SCREAMING_SNAKE_CASE : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase__) _SCREAMING_SNAKE_CASE : Any = alt_pipe.to(lowercase__) alt_pipe.set_progress_bar_config(disable=lowercase__) _SCREAMING_SNAKE_CASE : Union[str, Any] = """A painting of a squirrel eating a burger""" _SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Any = alt_pipe( [prompt] , generator=lowercase__ , num_inference_steps=2 , output_type="""np""" , image=lowercase__ , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""") def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""") # resize to resolution that is divisible by 8 but not 16 or 32 _SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((7_6_0, 5_0_4)) _SCREAMING_SNAKE_CASE : List[str] = """BAAI/AltDiffusion""" _SCREAMING_SNAKE_CASE : int = AltDiffusionImgaImgPipeline.from_pretrained( lowercase__ , safety_checker=lowercase__ , ) pipe.to(lowercase__) pipe.set_progress_bar_config(disable=lowercase__) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE : Union[str, Any] = """A fantasy landscape, trending on artstation""" _SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Tuple = pipe( prompt=lowercase__ , image=lowercase__ , strength=0.75 , guidance_scale=7.5 , generator=lowercase__ , output_type="""np""" , ) _SCREAMING_SNAKE_CASE : List[Any] = output.images[0] _SCREAMING_SNAKE_CASE : Dict = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) _SCREAMING_SNAKE_CASE : Dict = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Any): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""") _SCREAMING_SNAKE_CASE : str = init_image.resize((7_6_8, 5_1_2)) _SCREAMING_SNAKE_CASE : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""") _SCREAMING_SNAKE_CASE : Dict = """BAAI/AltDiffusion""" _SCREAMING_SNAKE_CASE : int = AltDiffusionImgaImgPipeline.from_pretrained( lowercase__ , safety_checker=lowercase__ , ) pipe.to(lowercase__) pipe.set_progress_bar_config(disable=lowercase__) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE : Optional[int] = """A fantasy landscape, trending on artstation""" _SCREAMING_SNAKE_CASE : str = torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Any = pipe( prompt=lowercase__ , image=lowercase__ , strength=0.75 , guidance_scale=7.5 , generator=lowercase__ , output_type="""np""" , ) _SCREAMING_SNAKE_CASE : Dict = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image).max() < 1e-2
338
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _A ( ): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __lowercase = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , A__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _A ( ): """simple docstring""" assert _test_patching.open is open __lowercase = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , A__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ): pass def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , A__ ) is None with patch_submodule(_test_patching , '''len''' , A__ ): assert _test_patching.len is mock assert _test_patching.len is len def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_start_and_stop_mock__''' __lowercase = patch_submodule(_test_patching , '''open''' , A__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _A ( ): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __lowercase = '''__test_patch_submodule_successive_join__''' __lowercase = '''__test_patch_submodule_successive_dirname__''' __lowercase = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ): pass
41
0
from itertools import permutations def a_ (__A ) -> Union[str, Any]: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __a : Any = [7, 11, 13, 17] for i, test in enumerate(A__ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def a_ (__A = 10 ) -> str: """simple docstring""" return sum( int("".join(map(A__ , A__ ) ) ) for num in permutations(range(A__ ) ) if is_substring_divisible(A__ ) ) if __name__ == "__main__": print(F"{solution() = }")
351
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase_ : """simple docstring""" def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __lowercase = ids_tensor([self.batch_size] ,self.num_choices ) __lowercase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return NezhaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.prepare_config_and_inputs() __lowercase = True __lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ): __lowercase = NezhaModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,): __lowercase = True __lowercase = NezhaModel(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,) __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): __lowercase = NezhaForMaskedLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): __lowercase = NezhaForNextSentencePrediction(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ): __lowercase = NezhaForPreTraining(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ): __lowercase = NezhaForQuestionAnswering(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ): __lowercase = self.num_labels __lowercase = NezhaForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ): __lowercase = self.num_labels __lowercase = NezhaForTokenClassification(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ): __lowercase = self.num_choices __lowercase = NezhaForMultipleChoice(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Tuple = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : List[str] = True def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ): __lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) if return_labels: if model_class in get_values(lowercase__ ): __lowercase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ ) __lowercase = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = NezhaModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : int ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): # This regression test was failing with PyTorch < 1.3 ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __lowercase = None self.model_tester.create_and_check_model_as_decoder( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = NezhaModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __lowercase = True __lowercase = model_class(config=lowercase__ ) __lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ) __lowercase = torch.jit.trace( lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) ) __lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ ) loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
41
0
"""simple docstring""" import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=13 , _SCREAMING_SNAKE_CASE : List[Any]=7 , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : List[str]=99 , _SCREAMING_SNAKE_CASE : Tuple=32 , _SCREAMING_SNAKE_CASE : str=5 , _SCREAMING_SNAKE_CASE : Any=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : str="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : int=0.1 , _SCREAMING_SNAKE_CASE : Dict=512 , _SCREAMING_SNAKE_CASE : Union[str, Any]=16 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : str=0.0_2 , _SCREAMING_SNAKE_CASE : List[str]=4 , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : Optional[Any] = seq_length SCREAMING_SNAKE_CASE : Optional[Any] = is_training SCREAMING_SNAKE_CASE : Tuple = use_attention_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : List[str] = use_labels SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : Dict = num_attention_heads SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : int = type_vocab_size SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Tuple = num_choices def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : int = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE : List[str] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = config_and_inputs SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCAmelCase__ ( lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Any = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = FlaxBertModelTester(self ) @slow def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBertModel.from_pretrained('bert-base-cased' ) SCREAMING_SNAKE_CASE : Optional[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase__ )
265
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''KEY''') lowerCAmelCase__ = TypeVar('''VAL''') @dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ ) class lowercase_ (Generic[KEY, VAL] ): """simple docstring""" SCREAMING_SNAKE_CASE : KEY SCREAMING_SNAKE_CASE : VAL class lowercase_ (_Item ): """simple docstring""" def __init__( self : Optional[int] ): super().__init__(lowercase__ ,lowercase__ ) def __bool__( self : List[str] ): return False lowerCAmelCase__ = _DeletedItem() class lowercase_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ): __lowercase = initial_block_size __lowercase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowercase = capacity_factor __lowercase = 0 def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ): return hash(lowercase__ ) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ): return (ind + 1) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ): __lowercase = self._buckets[ind] if not stored: __lowercase = _Item(lowercase__ ,lowercase__ ) self._len += 1 return True elif stored.key == key: __lowercase = _Item(lowercase__ ,lowercase__ ) return True else: return False def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): if len(self._buckets ) <= self._initial_block_size: return False __lowercase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ): __lowercase = self._buckets __lowercase = [None] * new_size __lowercase = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def SCREAMING_SNAKE_CASE ( self : str ): self._resize(len(self._buckets ) * 2 ) def SCREAMING_SNAKE_CASE ( self : Tuple ): self._resize(len(self._buckets ) // 2 ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ): __lowercase = self._get_bucket_index(lowercase__ ) for _ in range(len(self._buckets ) ): yield ind __lowercase = self._get_next_ind(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): for ind in self._iterate_buckets(lowercase__ ): if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ): break def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): if self._is_full(): self._size_up() self._add_item(lowercase__ ,lowercase__ ) def __delitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: raise KeyError(lowercase__ ) if item is _deleted: continue if item.key == key: __lowercase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowercase__ ) def __len__( self : Optional[int] ): return self._len def __iter__( self : str ): yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ): __lowercase = ''' ,'''.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
41
0
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class __A ( lowerCamelCase__ ): def __lt__( self :Any , __snake_case :List[Any] ): '''simple docstring''' return self[-1] < other[-1] def __eq__( self :Dict , __snake_case :Union[str, Any] ): '''simple docstring''' return self[-1] == other[-1] def lowerCAmelCase_ ( lowerCamelCase ): __magic_name__ : Any =[] # sort into stacks for element in collection: __magic_name__ : Any =Stack([element] ) __magic_name__ : str =bisect_left(A__ , A__ ) if i != len(A__ ): stacks[i].append(A__ ) else: stacks.append(A__ ) # use a heap-based merge to merge stack efficiently __magic_name__ : Tuple =merge(*(reversed(A__ ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase_ : str = input("Enter numbers separated by a comma:\n").strip() UpperCAmelCase_ : str = [int(item) for item in user_input.split(",")] print(patience_sort(unsorted))
21
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ,**lowercase__ : Tuple ): super().__init__(**lowercase__ ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self ,'''vision''' ) self.check_model_type(lowercase__ ) def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,): if "text_queries" in kwargs: __lowercase = kwargs.pop('''text_queries''' ) if isinstance(lowercase__ ,(str, Image.Image) ): __lowercase = {'''image''': image, '''candidate_labels''': candidate_labels} else: __lowercase = image __lowercase = super().__call__(lowercase__ ,**lowercase__ ) return results def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ): __lowercase = {} if "threshold" in kwargs: __lowercase = kwargs['''threshold'''] if "top_k" in kwargs: __lowercase = kwargs['''top_k'''] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ): __lowercase = load_image(inputs['''image'''] ) __lowercase = inputs['''candidate_labels'''] if isinstance(lowercase__ ,lowercase__ ): __lowercase = candidate_labels.split(''',''' ) __lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa ) for i, candidate_label in enumerate(lowercase__ ): __lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework ) __lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework ) yield { "is_last": i == len(lowercase__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ): __lowercase = model_inputs.pop('''target_size''' ) __lowercase = model_inputs.pop('''candidate_label''' ) __lowercase = model_inputs.pop('''is_last''' ) __lowercase = self.model(**lowercase__ ) __lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ): __lowercase = [] for model_output in model_outputs: __lowercase = model_output['''candidate_label'''] __lowercase = BaseModelOutput(lowercase__ ) __lowercase = self.image_processor.post_process_object_detection( outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): __lowercase = outputs['''scores'''][index].item() __lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] ) __lowercase = {'''score''': score, '''label''': label, '''box''': box} results.append(lowercase__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ ) if top_k: __lowercase = results[:top_k] return results def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ): if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) __lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist() __lowercase = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
41
0
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCamelCase__ ( lowerCamelCase__ ): '''simple docstring''' _lowerCamelCase = ['image_processor', 'tokenizer'] _lowerCamelCase = 'OwlViTImageProcessor' _lowerCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Any: A = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,lowercase__ ,) A = kwargs.pop("""feature_extractor""" ) A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase__ ,lowercase__ ) def __call__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="max_length" ,lowerCamelCase_="np" ,**lowerCamelCase_ ) -> Optional[int]: if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(lowercase__ ,lowercase__ ) or (isinstance(lowercase__ ,lowercase__ ) and not isinstance(text[0] ,lowercase__ )): A = [self.tokenizer(lowercase__ ,padding=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )] elif isinstance(lowercase__ ,lowercase__ ) and isinstance(text[0] ,lowercase__ ): A = [] # Maximum number of queries across batch A = max([len(lowercase__ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(lowercase__ ) != max_num_queries: A = t + [""" """] * (max_num_queries - len(lowercase__ )) A = self.tokenizer(lowercase__ ,padding=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ) encodings.append(lowercase__ ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": A = np.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 ) A = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 ) A = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A = torch.cat([encoding["""input_ids"""] for encoding in encodings] ,dim=0 ) A = torch.cat([encoding["""attention_mask"""] for encoding in encodings] ,dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A = tf.stack([encoding["""input_ids"""] for encoding in encodings] ,axis=0 ) A = tf.stack([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) A = BatchEncoding() A = input_ids A = attention_mask if query_images is not None: A = BatchEncoding() A = self.image_processor( lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ).pixel_values A = query_pixel_values if images is not None: A = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ) if text is not None and images is not None: A = image_features.pixel_values return encoding elif query_images is not None and images is not None: A = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ ) def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]: return self.image_processor.post_process(*lowercase__ ,**lowercase__ ) def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Any: return self.image_processor.post_process_object_detection(*lowercase__ ,**lowercase__ ) def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]: return self.image_processor.post_process_image_guided_detection(*lowercase__ ,**lowercase__ ) def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]: return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ ) def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]: return self.tokenizer.decode(*lowercase__ ,**lowercase__ ) @property def UpperCamelCase__ ( self ) -> Optional[Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowercase__ ,) return self.image_processor_class @property def UpperCamelCase__ ( self ) -> Tuple: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowercase__ ,) return self.image_processor
617
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli' SCREAMING_SNAKE_CASE : Optional[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) SCREAMING_SNAKE_CASE : Any = 'text_classifier' SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']] SCREAMING_SNAKE_CASE : List[str] = ['text'] def SCREAMING_SNAKE_CASE ( self : List[Any] ): super().setup() __lowercase = self.model.config __lowercase = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __lowercase = int(lowercase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ): __lowercase = labels return self.pre_processor( [text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = outputs.logits __lowercase = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
41
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase_ : str = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class UpperCamelCase_ ( lowerCamelCase__ ): _A : List[str] = 'albert' def __init__( self , snake_case__=3_00_00 , snake_case__=1_28 , snake_case__=40_96 , snake_case__=12 , snake_case__=1 , snake_case__=64 , snake_case__=1_63_84 , snake_case__=1 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=0 , snake_case__=5_12 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0.1 , snake_case__="absolute" , snake_case__=0 , snake_case__=2 , snake_case__=3 , **snake_case__ , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ ) UpperCAmelCase = vocab_size UpperCAmelCase = embedding_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_hidden_groups UpperCAmelCase = num_attention_heads UpperCAmelCase = inner_group_num UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = classifier_dropout_prob UpperCAmelCase = position_embedding_type class UpperCamelCase_ ( lowerCamelCase__ ): @property def UpperCamelCase_ ( self ) -> Dict: """simple docstring""" if self.task == "multiple-choice": UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: UpperCAmelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
673
'''simple docstring''' from collections.abc import Callable class lowercase_ : """simple docstring""" def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ): # Stores actual heap items. __lowercase = [] # Stores indexes of each item for supporting updates and deletion. __lowercase = {} # Stores current size of heap. __lowercase = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __lowercase = key or (lambda lowercase__ : x) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ): return int((i - 1) / 2 ) if i > 0 else None def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = int(2 * i + 1 ) return left if 0 < left < self.size else None def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ): __lowercase = int(2 * i + 2 ) return right if 0 < right < self.size else None def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ): __lowercase , __lowercase = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __lowercase , __lowercase = self.arr[j], self.arr[i] def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): return self.arr[i][1] < self.arr[j][1] def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._left(lowercase__ ) __lowercase = self._right(lowercase__ ) __lowercase = i if left is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = left if right is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = right return valid_parent def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._parent(lowercase__ ) while parent is not None and not self._cmp(lowercase__ ,lowercase__ ): self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = parent, self._parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = self._get_valid_parent(lowercase__ ) while valid_parent != index: self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] __lowercase = [item, self.key(lowercase__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] del self.pos_map[item] __lowercase = self.arr[self.size - 1] __lowercase = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ): __lowercase = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowercase__ )] ) else: __lowercase = [item, self.key(lowercase__ )] __lowercase = self.size self.size += 1 self._heapify_up(self.size - 1 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): return self.arr[0] if self.size else None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _A ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
41
0
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _UpperCamelCase ( lowerCamelCase__,unittest.TestCase ): '''simple docstring''' a_ : Dict = MobileBertTokenizer a_ : int = MobileBertTokenizerFast a_ : str = True a_ : Optional[int] = True a_ : List[Any] = filter_non_english a_ : Any = 'google/mobilebert-uncased' def _snake_case ( self : Any ): '''simple docstring''' super().setUp() __lowerCamelCase : str = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) __lowerCamelCase : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def _snake_case ( self : List[Any] , _lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCamelCase : str = """UNwant\u00E9d,running""" __lowerCamelCase : List[str] = """unwanted, running""" return input_text, output_text def _snake_case ( self : str ): '''simple docstring''' __lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file ) __lowerCamelCase : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(lowercase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def _snake_case ( self : List[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return __lowerCamelCase : List[Any] = self.get_tokenizer() __lowerCamelCase : int = self.get_rust_tokenizer() __lowerCamelCase : int = """UNwant\u00E9d,running""" __lowerCamelCase : Optional[Any] = tokenizer.tokenize(lowercase__ ) __lowerCamelCase : str = rust_tokenizer.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) __lowerCamelCase : Optional[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) __lowerCamelCase : List[Any] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) __lowerCamelCase : Optional[int] = self.get_rust_tokenizer() __lowerCamelCase : Optional[Any] = tokenizer.encode(lowercase__ ) __lowerCamelCase : str = rust_tokenizer.encode(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) # With lower casing __lowerCamelCase : Optional[Any] = self.get_tokenizer(do_lower_case=lowercase__ ) __lowerCamelCase : Dict = self.get_rust_tokenizer(do_lower_case=lowercase__ ) __lowerCamelCase : str = """UNwant\u00E9d,running""" __lowerCamelCase : Optional[Any] = tokenizer.tokenize(lowercase__ ) __lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) __lowerCamelCase : Dict = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) __lowerCamelCase : Dict = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) __lowerCamelCase : Any = self.get_rust_tokenizer() __lowerCamelCase : Optional[int] = tokenizer.encode(lowercase__ ) __lowerCamelCase : Tuple = rust_tokenizer.encode(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowerCamelCase : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=lowercase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : int = BasicTokenizer(do_lower_case=lowercase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _snake_case ( self : str ): '''simple docstring''' __lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : str = BasicTokenizer(do_lower_case=lowercase__ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowerCamelCase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] __lowerCamelCase : Optional[Any] = {} for i, token in enumerate(lowercase__ ): __lowerCamelCase : Any = i __lowerCamelCase : str = WordpieceTokenizer(vocab=lowercase__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def _snake_case ( self : Optional[int] ): '''simple docstring''' self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def _snake_case ( self : int ): '''simple docstring''' self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : Tuple = self.get_tokenizer() __lowerCamelCase : Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowercase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowercase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def _snake_case ( self : Any ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" ) __lowerCamelCase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase__ ) __lowerCamelCase : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase__ ) __lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ ) __lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def _snake_case ( self : Optional[Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __lowerCamelCase : Tuple = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __lowerCamelCase : List[Any] = tokenizer_r.encode_plus( lowercase__ , return_attention_mask=lowercase__ , return_token_type_ids=lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ , ) __lowerCamelCase : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowercase__ , """do_lower_case""" ) else False __lowerCamelCase : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), """Allen"""), ((2_1, 2_3), """##NL"""), ((2_3, 2_4), """##P"""), ((2_5, 3_3), """sentence"""), ((3_3, 3_4), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), """allen"""), ((2_1, 2_3), """##nl"""), ((2_3, 2_4), """##p"""), ((2_5, 3_3), """sentence"""), ((3_3, 3_4), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : int = ["""的""", """人""", """有"""] __lowerCamelCase : Union[str, Any] = """""".join(lowercase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCamelCase : Optional[int] = True __lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __lowerCamelCase : List[Any] = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ ) __lowerCamelCase : int = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ ) __lowerCamelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(lowercase__ ) __lowerCamelCase : Any = tokenizer_p.convert_ids_to_tokens(lowercase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowercase__ , lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) __lowerCamelCase : str = False __lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __lowerCamelCase : Optional[Any] = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ ) __lowerCamelCase : Any = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ ) __lowerCamelCase : str = tokenizer_r.convert_ids_to_tokens(lowercase__ ) __lowerCamelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase__ ) # it is expected that only the first Chinese character is not preceded by "##". __lowerCamelCase : Dict = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase__ ) ] self.assertListEqual(lowercase__ , lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ )
519
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ): __lowercase = [] def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ): self.events.append('''on_init_end''' ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ): self.events.append('''on_train_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ): self.events.append('''on_train_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ): self.events.append('''on_epoch_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ): self.events.append('''on_epoch_end''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_step_begin''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ): self.events.append('''on_step_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ): self.events.append('''on_evaluate''' ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ): self.events.append('''on_predict''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ): self.events.append('''on_save''' ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_log''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ): self.events.append('''on_prediction_step''' ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): shutil.rmtree(self.output_dir ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ ) __lowercase = RegressionPreTrainedModel(lowercase__ ) __lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ ) return Trainer( lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) ) # Order doesn't matter __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase__ ,lowercase__ ): if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,lowercase__ ) elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,cba.__class__ ) elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(cba.__class__ ,lowercase__ ) else: self.assertEqual(lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ): __lowercase = ['''on_init_end''', '''on_train_begin'''] __lowercase = 0 __lowercase = len(trainer.get_eval_dataloader() ) __lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(lowercase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.get_trainer() __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # Callbacks passed at init are added to the default callbacks __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback __lowercase = self.get_trainer(disable_tqdm=lowercase__ ) __lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] __lowercase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(cb.__class__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # We can also add, pop, or remove by instance __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(lowercase__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' ,category=lowercase__ ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # Independent log/save/eval __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # A bit of everything __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(lowercase__ ) in warn_mock.call_args[0][0]
41
0
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __snake_case : Dict = logging.get_logger(__name__) __snake_case : int = {'vocab_file': 'spiece.model'} __snake_case : Dict = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } __snake_case : Any = { 'google/bigbird-roberta-base': 4_096, 'google/bigbird-roberta-large': 4_096, 'google/bigbird-base-trivia-itc': 4_096, } class UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" _lowerCamelCase : Tuple =VOCAB_FILES_NAMES _lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : str =['input_ids', 'attention_mask'] _lowerCamelCase : List[int] =[] def __init__( self : str , _lowerCamelCase : int , _lowerCamelCase : Optional[Any]="<unk>" , _lowerCamelCase : str="<s>" , _lowerCamelCase : List[Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : int="[SEP]" , _lowerCamelCase : str="[MASK]" , _lowerCamelCase : Tuple="[CLS]" , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : str , ): A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sep_token=lowercase__ , mask_token=lowercase__ , cls_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , ) A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase__ ) @property def A__ ( self : List[str] ): return self.sp_model.get_piece_size() def A__ ( self : Tuple ): A__ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , _lowerCamelCase : Optional[int] ): A__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A__ ( self : Tuple , _lowerCamelCase : str ): return self.sp_model.encode(lowercase__ , out_type=lowercase__ ) def A__ ( self : Union[str, Any] , _lowerCamelCase : Dict ): return self.sp_model.piece_to_id(lowercase__ ) def A__ ( self : int , _lowerCamelCase : List[str] ): A__ = self.sp_model.IdToPiece(lowercase__ ) return token def A__ ( self : str , _lowerCamelCase : Tuple ): A__ = [] A__ = '''''' A__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase__ ) + token A__ = True A__ = [] else: current_sub_tokens.append(lowercase__ ) A__ = False out_string += self.sp_model.decode(lowercase__ ) return out_string.strip() def A__ ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : bool = True , **_lowerCamelCase : Optional[Any] , ): A__ = kwargs.pop('''use_source_tokenizer''' , lowercase__ ) A__ = self.convert_ids_to_tokens(lowercase__ , skip_special_tokens=lowercase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A__ = [] A__ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowercase__ ) ) A__ = [] sub_texts.append(lowercase__ ) else: current_sub_text.append(lowercase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowercase__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A__ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(lowercase__ ) ) else: A__ = ''''''.join(lowercase__ ) A__ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A__ = self.clean_up_tokenization(lowercase__ ) return clean_text else: return text def A__ ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): if not os.path.isdir(lowercase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase__ , '''wb''' ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (out_vocab_file,) def A__ ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ = [self.cls_token_id] A__ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def A__ ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ ) if token_ids_a is None: return [1] + ([0] * len(lowercase__ )) + [1] return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1] def A__ ( self : Union[str, Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
571
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : jnp.ndarray SCREAMING_SNAKE_CASE : jnp.ndarray class lowercase_ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __lowercase = [] for i in range(len(self.block_out_channels ) - 1 ): __lowercase = self.block_out_channels[i] __lowercase = self.block_out_channels[i + 1] __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = blocks __lowercase = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : List[str] ,lowercase__ : Optional[int] ): __lowercase = self.conv_in(lowercase__ ) __lowercase = nn.silu(lowercase__ ) for block in self.blocks: __lowercase = block(lowercase__ ) __lowercase = nn.silu(lowercase__ ) __lowercase = self.conv_out(lowercase__ ) return embedding @flax_register_to_config class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 3_2 SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) SCREAMING_SNAKE_CASE : int = 2 SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None SCREAMING_SNAKE_CASE : int = 1_2_8_0 SCREAMING_SNAKE_CASE : float = 0.0 SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa SCREAMING_SNAKE_CASE : bool = True SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : str = "rgb" SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ): # init input tensors __lowercase = (1, self.in_channels, self.sample_size, self.sample_size) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase = jnp.ones((1,) ,dtype=jnp.intaa ) __lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) __lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase , __lowercase = jax.random.split(lowercase__ ) __lowercase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"] def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.block_out_channels __lowercase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowercase = self.num_attention_heads or self.attention_head_dim # input __lowercase = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time __lowercase = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) __lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype ) __lowercase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) __lowercase = self.only_cross_attention if isinstance(lowercase__ ,lowercase__ ): __lowercase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = (num_attention_heads,) * len(self.down_block_types ) # down __lowercase = [] __lowercase = [] __lowercase = block_out_channels[0] __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) for i, down_block_type in enumerate(self.down_block_types ): __lowercase = output_channel __lowercase = block_out_channels[i] __lowercase = i == len(lowercase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowercase = FlaxCrossAttnDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: __lowercase = FlaxDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(lowercase__ ) for _ in range(self.layers_per_block ): __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) if not is_final_block: __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) __lowercase = down_blocks __lowercase = controlnet_down_blocks # mid __lowercase = block_out_channels[-1] __lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,): __lowercase = self.controlnet_conditioning_channel_order if channel_order == "bgr": __lowercase = jnp.flip(lowercase__ ,axis=1 ) # 1. time if not isinstance(lowercase__ ,jnp.ndarray ): __lowercase = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0: __lowercase = timesteps.astype(dtype=jnp.floataa ) __lowercase = jnp.expand_dims(lowercase__ ,0 ) __lowercase = self.time_proj(lowercase__ ) __lowercase = self.time_embedding(lowercase__ ) # 2. pre-process __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.conv_in(lowercase__ ) __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.controlnet_cond_embedding(lowercase__ ) sample += controlnet_cond # 3. down __lowercase = (sample,) for down_block in self.down_blocks: if isinstance(lowercase__ ,lowercase__ ): __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) else: __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid __lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) # 5. contronet blocks __lowercase = () for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ): __lowercase = controlnet_block(lowercase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __lowercase = controlnet_down_block_res_samples __lowercase = self.controlnet_mid_block(lowercase__ ) # 6. scaling __lowercase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
41
0
from __future__ import annotations from math import ceil, floor, sqrt def a (_lowerCAmelCase = 2_0_0_0_0_0_0 ): SCREAMING_SNAKE_CASE_ = [0] SCREAMING_SNAKE_CASE_ = 4_2 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_ = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_ = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_ = 4_2 # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_ = 4_2 # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_ = 4_2 # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_ = 4_2 # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_ = 4_2 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_ = floor(A__ ) SCREAMING_SNAKE_CASE_ = ceil(A__ ) SCREAMING_SNAKE_CASE_ = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_ = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_ = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_ = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_ = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_ = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
234
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase__ = False lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '''ybelkada/fonts''' def _A ( ): """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " '''Pix2StructImageProcessor. Please upgrade torch.''' ) def _A ( A__ , A__ , A__ ): """simple docstring""" requires_backends(A__ , ['''torch'''] ) _check_torch_version() __lowercase = image_tensor.unsqueeze(0 ) __lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) __lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 ) __lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Add new lines so that each line is no more than 80 characters. __lowercase = textwrap.TextWrapper(width=80 ) __lowercase = wrapper.wrap(text=A__ ) __lowercase = '''\n'''.join(A__ ) if font_bytes is not None and font_path is None: __lowercase = io.BytesIO(A__ ) elif font_path is not None: __lowercase = font_path else: __lowercase = hf_hub_download(A__ , '''Arial.TTF''' ) __lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. __lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) ) __lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ ) # Create the actual image with a bit of padding around the text. __lowercase = text_width + left_padding + right_padding __lowercase = text_height + top_padding + bottom_padding __lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ ) __lowercase = ImageDraw.Draw(A__ ) draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ ) return image def _A ( A__ , A__ , **A__ ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Convert to PIL image if necessary __lowercase = to_pil_image(A__ ) __lowercase = render_text(A__ , **A__ ) __lowercase = max(header_image.width , image.width ) __lowercase = int(image.height * (new_width / image.width) ) __lowercase = int(header_image.height * (new_width / header_image.width) ) __lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary __lowercase = to_numpy_array(A__ ) if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST: __lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST ) return new_image class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches'] def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,): super().__init__(**lowercase__ ) __lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6} __lowercase = do_normalize __lowercase = do_convert_rgb __lowercase = max_patches __lowercase = is_vqa def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ): requires_backends(self.extract_flattened_patches ,'''torch''' ) _check_torch_version() # convert to torch __lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST ) __lowercase = torch.from_numpy(lowercase__ ) __lowercase , __lowercase = patch_size['''height'''], patch_size['''width'''] __lowercase , __lowercase = get_image_size(lowercase__ ) # maximize scale s.t. __lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) __lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 ) __lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 ) __lowercase = max(num_feasible_rows * patch_height ,1 ) __lowercase = max(num_feasible_cols * patch_width ,1 ) __lowercase = torch.nn.functional.interpolate( image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] __lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = patches.shape __lowercase = patches_shape[1] __lowercase = patches_shape[2] __lowercase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] __lowercase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] __lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] ) __lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] __lowercase = row_ids.to(torch.floataa ) __lowercase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] __lowercase = torch.cat([row_ids, col_ids, patches] ,-1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] __lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float() __lowercase = to_numpy_array(lowercase__ ) return result def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ): if image.dtype == np.uinta: __lowercase = image.astype(np.floataa ) # take mean across the whole `image` __lowercase = np.mean(lowercase__ ) __lowercase = np.std(lowercase__ ) __lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,): __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase = patch_size if patch_size is not None else self.patch_size __lowercase = max_patches if max_patches is not None else self.max_patches __lowercase = self.is_vqa if kwargs.get('''data_format''' ,lowercase__ ) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''' ) __lowercase = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase = [convert_to_rgb(lowercase__ ) for image in images] # All transformations expect numpy arrays. __lowercase = [to_numpy_array(lowercase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''' ) __lowercase = kwargs.pop('''font_bytes''' ,lowercase__ ) __lowercase = kwargs.pop('''font_path''' ,lowercase__ ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = [header_text] * len(lowercase__ ) __lowercase = [ render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ ) for i, image in enumerate(lowercase__ ) ] if do_normalize: __lowercase = [self.normalize(image=lowercase__ ) for image in images] # convert to torch tensor and permute __lowercase = [ self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ ) for image in images ] # create attention mask in numpy __lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] __lowercase = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ ) return encoded_outputs
41
0
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __a: Optional[int] = pd.read_csv("""sample_data.csv""", header=None) __a: int = df.shape[:1][0] # If you're using some other dataset input the target column __a: List[Any] = df.iloc[:, 1:2] __a: Union[str, Any] = actual_data.values.reshape(len_data, 1) __a: Optional[int] = MinMaxScaler().fit_transform(actual_data) __a: int = 10 __a: int = 5 __a: Any = 20 __a: Tuple = len_data - periods * look_back __a: Optional[Any] = actual_data[:division] __a: Tuple = actual_data[division - look_back :] __a , __a: Dict = [], [] __a , __a: List[Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __a: List[Any] = np.array(train_x) __a: str = np.array(test_x) __a: int = np.array([list(i.ravel()) for i in train_y]) __a: List[Any] = np.array([list(i.ravel()) for i in test_y]) __a: Optional[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") __a: List[str] = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) __a: Optional[int] = model.predict(x_test)
152
'''simple docstring''' import doctest from collections import deque import numpy as np class lowercase_ : """simple docstring""" def __init__( self : Optional[Any] ): __lowercase = [2, 1, 2, -1] __lowercase = [1, 2, 3, 4] def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = len(self.first_signal ) __lowercase = len(self.second_signal ) __lowercase = max(lowercase__ ,lowercase__ ) # create a zero matrix of max_length x max_length __lowercase = [[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __lowercase = deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ ,2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
41
0
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = logging.get_logger(__name__) __A = { "facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json", # See all DETR models at https://huggingface.co/models?filter=detr } class UpperCAmelCase (lowerCamelCase__ ): """simple docstring""" _UpperCAmelCase :Dict = 'detr' _UpperCAmelCase :List[str] = ['past_key_values'] _UpperCAmelCase :Any = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=100 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase__: Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowercase__ , lowercase__ ): lowercase__: Any = backbone_config.get('''model_type''' ) lowercase__: int = CONFIG_MAPPING[backbone_model_type] lowercase__: Tuple = config_class.from_dict(lowercase__ ) # set timm attributes to None lowercase__, lowercase__, lowercase__: List[str] = None, None, None lowercase__: List[Any] = use_timm_backbone lowercase__: Optional[Any] = backbone_config lowercase__: List[Any] = num_channels lowercase__: Any = num_queries lowercase__: Dict = d_model lowercase__: List[Any] = encoder_ffn_dim lowercase__: Any = encoder_layers lowercase__: Any = encoder_attention_heads lowercase__: Dict = decoder_ffn_dim lowercase__: Optional[int] = decoder_layers lowercase__: Optional[int] = decoder_attention_heads lowercase__: Optional[int] = dropout lowercase__: Dict = attention_dropout lowercase__: Dict = activation_dropout lowercase__: Union[str, Any] = activation_function lowercase__: Any = init_std lowercase__: int = init_xavier_std lowercase__: Optional[Any] = encoder_layerdrop lowercase__: Union[str, Any] = decoder_layerdrop lowercase__: List[Any] = encoder_layers lowercase__: Optional[Any] = auxiliary_loss lowercase__: Dict = position_embedding_type lowercase__: str = backbone lowercase__: Dict = use_pretrained_backbone lowercase__: Dict = dilation # Hungarian matcher lowercase__: Tuple = class_cost lowercase__: str = bbox_cost lowercase__: Optional[int] = giou_cost # Loss coefficients lowercase__: int = mask_loss_coefficient lowercase__: int = dice_loss_coefficient lowercase__: str = bbox_loss_coefficient lowercase__: Dict = giou_loss_coefficient lowercase__: Optional[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ ) @property def _snake_case ( self ): return self.encoder_attention_heads @property def _snake_case ( self ): return self.d_model @classmethod def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ): return cls(backbone_config=lowercase__ , **lowercase__ ) def _snake_case ( self ): lowercase__: Dict = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowercase__: Optional[Any] = self.backbone_config.to_dict() lowercase__: Optional[int] = self.__class__.model_type return output class UpperCAmelCase (lowerCamelCase__ ): """simple docstring""" _UpperCAmelCase :str = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _snake_case ( self ): return 1e-5 @property def _snake_case ( self ): return 12
586
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
41
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _snake_case ( lowerCamelCase__ ): """simple docstring""" a = 'trocr' a = ['past_key_values'] a = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Union[str, Any] , _A : Dict=5_0_2_6_5 , _A : Dict=1_0_2_4 , _A : Union[str, Any]=1_2 , _A : Optional[int]=1_6 , _A : int=4_0_9_6 , _A : str="gelu" , _A : Any=5_1_2 , _A : List[str]=0.1 , _A : Tuple=0.0 , _A : Optional[Any]=0.0 , _A : int=2 , _A : List[Any]=0.02 , _A : Optional[Any]=0.0 , _A : int=True , _A : str=False , _A : List[str]=True , _A : str=True , _A : Any=1 , _A : Any=0 , _A : Union[str, Any]=2 , **_A : Union[str, Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = vocab_size _SCREAMING_SNAKE_CASE : Tuple = d_model _SCREAMING_SNAKE_CASE : List[str] = decoder_layers _SCREAMING_SNAKE_CASE : Any = decoder_attention_heads _SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim _SCREAMING_SNAKE_CASE : int = activation_function _SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings _SCREAMING_SNAKE_CASE : Any = dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout _SCREAMING_SNAKE_CASE : str = init_std _SCREAMING_SNAKE_CASE : str = decoder_layerdrop _SCREAMING_SNAKE_CASE : Optional[int] = use_cache _SCREAMING_SNAKE_CASE : List[Any] = scale_embedding _SCREAMING_SNAKE_CASE : str = use_learned_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = layernorm_embedding super().__init__( pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
338
'''simple docstring''' import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase__ = getLogger(__name__) lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ): """simple docstring""" __lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' ) __lowercase = str(A__ ) __lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ ) if fpaa: __lowercase = model.half() __lowercase = AutoTokenizer.from_pretrained(A__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __lowercase = time.time() # update config with task specific params use_task_specific_params(A__ , A__ ) if prefix is None: __lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ): __lowercase = [prefix + text for text in examples_chunk] __lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ ) __lowercase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , ) __lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __lowercase = int(time.time() - start_time ) # seconds __lowercase = len(A__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _A ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def _A ( A__=True ): """simple docstring""" __lowercase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __lowercase , __lowercase = parser.parse_known_args() __lowercase = parse_numeric_n_bool_cl_kwargs(A__ ) if parsed_args and verbose: print(F"parsed the following generate kwargs: {parsed_args}" ) __lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __lowercase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=A__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __lowercase = generate_summaries_or_translations( A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , ) if args.reference_path is None: return {} # Compute scores __lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge __lowercase = [x.rstrip() for x in open(args.save_path ).readlines()] __lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )] __lowercase = score_fn(A__ , A__ ) scores.update(A__ ) if args.dump_args: scores.update(A__ ) if args.info: __lowercase = args.info if verbose: print(A__ ) if args.score_path is not None: json.dump(A__ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
41
0
import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase__ = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def a_ (__A ) -> Optional[Any]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def a_ (__A ) -> List[Any]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __a : Optional[int] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ )
351
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ ): """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(A__ ): print(F"{i}\t\t{d}" ) def _A ( A__ , A__ , A__ ): """simple docstring""" for j in range(A__ ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = [float('''inf''' )] * vertex_count __lowercase = 0.0 for _ in range(vertex_count - 1 ): for j in range(A__ ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __lowercase = distance[u] + w __lowercase = check_negative_cycle(A__ , A__ , A__ ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip()) lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip()) lowerCAmelCase__ = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight} lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip()) lowerCAmelCase__ = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
41
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Tuple = logging.get_logger(__name__) A_ : Optional[int] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class lowerCAmelCase__ ( lowerCamelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[Any] = 'switch_transformers' _SCREAMING_SNAKE_CASE : Union[str, Any] = ['past_key_values'] _SCREAMING_SNAKE_CASE : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple=32_128 , _SCREAMING_SNAKE_CASE : str=768 , _SCREAMING_SNAKE_CASE : Any=64 , _SCREAMING_SNAKE_CASE : List[str]=2_048 , _SCREAMING_SNAKE_CASE : Union[str, Any]=64 , _SCREAMING_SNAKE_CASE : List[str]=12 , _SCREAMING_SNAKE_CASE : List[Any]=3 , _SCREAMING_SNAKE_CASE : str=12 , _SCREAMING_SNAKE_CASE : Optional[int]=3 , _SCREAMING_SNAKE_CASE : Union[str, Any]=12 , _SCREAMING_SNAKE_CASE : Any=8 , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : Any=0.0_1 , _SCREAMING_SNAKE_CASE : str="float32" , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : int=32 , _SCREAMING_SNAKE_CASE : str=128 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Dict=1E-6 , _SCREAMING_SNAKE_CASE : Dict=0.0_0_1 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0_0_1 , _SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , _SCREAMING_SNAKE_CASE : int="relu" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1 , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE : Dict = d_model SCREAMING_SNAKE_CASE : Optional[Any] = d_kv SCREAMING_SNAKE_CASE : Dict = d_ff SCREAMING_SNAKE_CASE : Optional[int] = num_sparse_encoder_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_layers SCREAMING_SNAKE_CASE : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry SCREAMING_SNAKE_CASE : Union[str, Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: SCREAMING_SNAKE_CASE : Dict = self.num_layers // self.num_sparse_encoder_layers else: SCREAMING_SNAKE_CASE : List[str] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: SCREAMING_SNAKE_CASE : Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers else: SCREAMING_SNAKE_CASE : Any = self.num_decoder_layers # HACK: this will create 0 sparse layers SCREAMING_SNAKE_CASE : Optional[int] = num_heads SCREAMING_SNAKE_CASE : Any = num_experts SCREAMING_SNAKE_CASE : Any = expert_capacity SCREAMING_SNAKE_CASE : Tuple = router_bias SCREAMING_SNAKE_CASE : int = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) SCREAMING_SNAKE_CASE : int = router_dtype SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens SCREAMING_SNAKE_CASE : Optional[int] = relative_attention_num_buckets SCREAMING_SNAKE_CASE : List[Any] = relative_attention_max_distance SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate SCREAMING_SNAKE_CASE : str = layer_norm_epsilon SCREAMING_SNAKE_CASE : Dict = initializer_factor SCREAMING_SNAKE_CASE : Optional[Any] = feed_forward_proj SCREAMING_SNAKE_CASE : List[str] = use_cache SCREAMING_SNAKE_CASE : str = add_router_probs SCREAMING_SNAKE_CASE : Tuple = router_z_loss_coef SCREAMING_SNAKE_CASE : Dict = router_aux_loss_coef SCREAMING_SNAKE_CASE : Any = self.feed_forward_proj.split('-' ) SCREAMING_SNAKE_CASE : Optional[Any] = act_info[-1] SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == 'gated' if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": SCREAMING_SNAKE_CASE : Tuple = 'gelu_new' super().__init__( pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , **lowercase__ , )
265
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ): warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' ,lowercase__ ,) super().__init__(*lowercase__ ,**lowercase__ )
41
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ : Optional[Any] = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : str = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _A ( A__ ): """simple docstring""" __lowercase = FileLock(str(tmpdir / '''foo.lock''' ) ) __lowercase = FileLock(str(tmpdir / '''foo.lock''' ) ) __lowercase = 0.0_1 with locka.acquire(): with pytest.raises(A__ ): __lowercase = time.time() locka.acquire(A__ ) assert time.time() - _start > timeout def _A ( A__ ): """simple docstring""" __lowercase = '''a''' * 1000 + '''.lock''' __lowercase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(A__ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 __lowercase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(A__ ): locka.acquire(0 )
41
0
"""simple docstring""" import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def _A ( _a : Union[str, Any] ): """simple docstring""" A = tmp_path / """file.csv""" A = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(A__ , """w""" ) as f: f.write(A__ ) return str(A__ ) @pytest.fixture def _A ( _a : int ): """simple docstring""" A = tmp_path / """malformed_file.csv""" A = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(A__ , """w""" ) as f: f.write(A__ ) return str(A__ ) @pytest.fixture def _A ( _a : Optional[int] , _a : Union[str, Any] ): """simple docstring""" A = tmp_path / """csv_with_image.csv""" A = textwrap.dedent( f'\\n image\n {image_file}\n ' ) with open(A__ , """w""" ) as f: f.write(A__ ) return str(A__ ) @pytest.fixture def _A ( _a : Any ): """simple docstring""" A = tmp_path / """csv_with_label.csv""" A = textwrap.dedent( """\ label good bad good """ ) with open(A__ , """w""" ) as f: f.write(A__ ) return str(A__ ) @pytest.fixture def _A ( _a : Any ): """simple docstring""" A = tmp_path / """csv_with_int_list.csv""" A = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(A__ , """w""" ) as f: f.write(A__ ) return str(A__ ) def _A ( _a : List[str] , _a : Any , _a : Union[str, Any] ): """simple docstring""" A = Csv() A = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(A__ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(A__ ) in record.message for record in caplog.records ) @require_pil def _A ( _a : Dict ): """simple docstring""" with open(A__ , encoding="""utf-8""" ) as f: A = f.read().splitlines()[1] A = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) A = csv._generate_tables([[csv_file_with_image]] ) A = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() A = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def _A ( _a : List[Any] ): """simple docstring""" with open(A__ , encoding="""utf-8""" ) as f: A = f.read().splitlines()[1:] A = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) A = csv._generate_tables([[csv_file_with_label]] ) A = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() A = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(A__ ) for label in labels] def _A ( _a : Any ): """simple docstring""" A = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda _a : [int(A__ ) for i in x.split()]} ) A = csv._generate_tables([[csv_file_with_int_list]] ) A = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) A = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
617
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase__ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
41
0
"""simple docstring""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ : List[Any] = logging.get_logger(__name__) def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = os.path.abspath(A__ ) logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model UpperCAmelCase = tf.train.list_variables(A__ ) UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") UpperCAmelCase = full_name.split("""/""" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(F'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' UpperCAmelCase = name[1:] # figure out how many levels deep the name is UpperCAmelCase = 0 for _name in name: if _name.startswith("""layer_with_weights""" ): depth += 1 else: break layer_depth.append(A__ ) # read data UpperCAmelCase = tf.train.load_variable(A__ , A__ ) names.append("""/""".join(A__ ) ) arrays.append(A__ ) logger.info(F'''Read a total of {len(A__ ):,} layers''' ) # Sanity check if len(set(A__ ) ) != 1: raise ValueError(F'''Found layer names with different depths (layer depth {list(set(A__ ) )})''' ) UpperCAmelCase = list(set(A__ ) )[0] if layer_depth != 1: raise ValueError( """The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP""" """ heads.""" ) # convert layers logger.info("""Converting weights...""" ) for full_name, array in zip(A__ , A__ ): UpperCAmelCase = full_name.split("""/""" ) UpperCAmelCase = model UpperCAmelCase = [] for i, m_name in enumerate(A__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("""layer_with_weights""" ): UpperCAmelCase = int(m_name.split("""-""" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["""embeddings""", """LayerNorm"""] ) UpperCAmelCase = getattr(A__ , """embeddings""" ) UpperCAmelCase = getattr(A__ , """LayerNorm""" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] ) UpperCAmelCase = getattr(A__ , """encoder""" ) UpperCAmelCase = getattr(A__ , """layer""" ) UpperCAmelCase = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["""pooler""", """dense"""] ) UpperCAmelCase = getattr(A__ , """pooler""" ) UpperCAmelCase = getattr(A__ , """dense""" ) elif m_name == "embeddings": trace.append("""embeddings""" ) UpperCAmelCase = getattr(A__ , """embeddings""" ) if layer_num == 0: trace.append("""word_embeddings""" ) UpperCAmelCase = getattr(A__ , """word_embeddings""" ) elif layer_num == 1: trace.append("""position_embeddings""" ) UpperCAmelCase = getattr(A__ , """position_embeddings""" ) elif layer_num == 2: trace.append("""token_type_embeddings""" ) UpperCAmelCase = getattr(A__ , """token_type_embeddings""" ) else: raise ValueError(F'''Unknown embedding layer with name {full_name}''' ) trace.append("""weight""" ) UpperCAmelCase = getattr(A__ , """weight""" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["""attention""", """self"""] ) UpperCAmelCase = getattr(A__ , """attention""" ) UpperCAmelCase = getattr(A__ , """self""" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["""attention""", """output""", """LayerNorm"""] ) UpperCAmelCase = getattr(A__ , """attention""" ) UpperCAmelCase = getattr(A__ , """output""" ) UpperCAmelCase = getattr(A__ , """LayerNorm""" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["""attention""", """output""", """dense"""] ) UpperCAmelCase = getattr(A__ , """attention""" ) UpperCAmelCase = getattr(A__ , """output""" ) UpperCAmelCase = getattr(A__ , """dense""" ) elif m_name == "_output_dense": # output dense trace.extend(["""output""", """dense"""] ) UpperCAmelCase = getattr(A__ , """output""" ) UpperCAmelCase = getattr(A__ , """dense""" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["""output""", """LayerNorm"""] ) UpperCAmelCase = getattr(A__ , """output""" ) UpperCAmelCase = getattr(A__ , """LayerNorm""" ) elif m_name == "_key_dense": # attention key trace.append("""key""" ) UpperCAmelCase = getattr(A__ , """key""" ) elif m_name == "_query_dense": # attention query trace.append("""query""" ) UpperCAmelCase = getattr(A__ , """query""" ) elif m_name == "_value_dense": # attention value trace.append("""value""" ) UpperCAmelCase = getattr(A__ , """value""" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["""intermediate""", """dense"""] ) UpperCAmelCase = getattr(A__ , """intermediate""" ) UpperCAmelCase = getattr(A__ , """dense""" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("""output""" ) UpperCAmelCase = getattr(A__ , """output""" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("""bias""" ) UpperCAmelCase = getattr(A__ , """bias""" ) elif m_name in ["kernel", "gamma"]: trace.append("""weight""" ) UpperCAmelCase = getattr(A__ , """weight""" ) else: logger.warning(F'''Ignored {m_name}''' ) # for certain layers reshape is necessary UpperCAmelCase = """.""".join(A__ ) if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , A__ ) or re.match( r"""(\S+)\.attention\.output\.dense\.weight""" , A__ ): UpperCAmelCase = array.reshape(pointer.data.shape ) if "kernel" in full_name: UpperCAmelCase = array.transpose() if pointer.shape == array.shape: UpperCAmelCase = torch.from_numpy(A__ ) else: raise ValueError( F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' F''' {array.shape}''' ) logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' logger.info(F'''Loading model based on config from {config_path}...''' ) UpperCAmelCase = BertConfig.from_json_file(A__ ) UpperCAmelCase = BertModel(A__ ) # Load weights from checkpoint logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(A__ , A__ , A__ ) # Save pytorch-model logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , A__ ) if __name__ == "__main__": lowerCAmelCase_ : Dict = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model (must include filename).''', ) lowerCAmelCase_ : Any = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
673
'''simple docstring''' import argparse import os import re lowerCAmelCase__ = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowerCAmelCase__ = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''') def _A ( A__ ): """simple docstring""" __lowercase = _re_indent.search(A__ ) return "" if search is None else search.groups()[0] def _A ( A__ , A__="" , A__=None , A__=None ): """simple docstring""" __lowercase = 0 __lowercase = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(A__ ): index += 1 __lowercase = ['''\n'''.join(lines[:index] )] else: __lowercase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __lowercase = [lines[index]] index += 1 while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(A__ ) ) if index < len(A__ ) - 1: __lowercase = [lines[index + 1]] index += 1 else: __lowercase = [] else: blocks.append('''\n'''.join(A__ ) ) __lowercase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(A__ ) > 0: blocks.append('''\n'''.join(A__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(A__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _A ( A__ ): """simple docstring""" def _inner(A__ ): return key(A__ ).lower().replace('''_''' , '''''' ) return _inner def _A ( A__ , A__=None ): """simple docstring""" def noop(A__ ): return x if key is None: __lowercase = noop # Constants are all uppercase, they go first. __lowercase = [obj for obj in objects if key(A__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()] # Functions begin with a lowercase, they go last. __lowercase = [obj for obj in objects if not key(A__ )[0].isupper()] __lowercase = ignore_underscore(A__ ) return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) def _A ( A__ ): """simple docstring""" def _replace(A__ ): __lowercase = match.groups()[0] if "," not in imports: return F"[{imports}]" __lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]" __lowercase = import_statement.split('''\n''' ) if len(A__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __lowercase = 2 if lines[1].strip() == '''[''' else 1 __lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __lowercase = sort_objects(A__ , key=lambda A__ : x[1] ) __lowercase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(A__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __lowercase = _re_bracket_content.sub(_replace , lines[1] ) else: __lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] __lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] ) return "\n".join(A__ ) else: # Finally we have to deal with imports fitting on one line __lowercase = _re_bracket_content.sub(_replace , A__ ) return import_statement def _A ( A__ , A__=True ): """simple docstring""" with open(A__ , '''r''' ) as f: __lowercase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __lowercase = split_code_in_indented_blocks( A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(A__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __lowercase = main_blocks[block_idx] __lowercase = block.split('''\n''' ) # Get to the start of the imports. __lowercase = 0 while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __lowercase = len(A__ ) else: line_idx += 1 if line_idx >= len(A__ ): continue # Ignore beginning and last line: they don't contain anything. __lowercase = '''\n'''.join(block_lines[line_idx:-1] ) __lowercase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ ) # We have two categories of import key: list or _import_structure[key].append/extend __lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None] __lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __lowercase = 0 __lowercase = [] for i in range(len(A__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(A__ ) count += 1 # And we put our main block back together with its first and last line. __lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(A__ ): if check_only: return True else: print(F"Overwriting {file}." ) with open(A__ , '''w''' ) as f: f.write('''\n'''.join(A__ ) ) def _A ( A__=True ): """simple docstring""" __lowercase = [] for root, _, files in os.walk(A__ ): if "__init__.py" in files: __lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ ) if result: __lowercase = [os.path.join(A__ , '''__init__.py''' )] if len(A__ ) > 0: raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowerCAmelCase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
41
0
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration __UpperCamelCase : List[Any] = 50000 __UpperCamelCase : str = 5000 __UpperCamelCase , __UpperCamelCase : Tuple = os.path.split(__file__) __UpperCamelCase : Union[str, Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ): """simple docstring""" for i in range(A__ ): __lowerCamelCase : List[Any] = dataset[i] @get_duration def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int ): """simple docstring""" for i in range(0 , len(A__ ) , A__ ): __lowerCamelCase : int = dataset[i : i + batch_size] @get_duration def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ): """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(A__ ): __lowerCamelCase : Optional[int] = dataset[i] @get_duration def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ): """simple docstring""" with dataset.formatted_as(type=A__ ): for i in range(0 , A__ , A__ ): __lowerCamelCase : Tuple = dataset[i : i + batch_size] def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : Tuple = {"""num examples""": SPEED_TEST_N_EXAMPLES} __lowerCamelCase : List[str] = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}), ] __lowerCamelCase : Dict = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("""generating dataset""" ) __lowerCamelCase : Tuple = datasets.Features( {"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} ) __lowerCamelCase : List[Any] = generate_example_dataset( os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ , seq_shapes={"""list""": (100,)} , ) print("""first set of iterations""" ) for func, kwargs in functions: print(func.__name__ , str(A__ ) ) __lowerCamelCase : Dict = func(A__ , **A__ ) print("""shuffling dataset""" ) __lowerCamelCase : Tuple = dataset.shuffle() print("""Second set of iterations (after shuffling""" ) for func, kwargs in functions_shuffled: print("""shuffled """ , func.__name__ , str(A__ ) ) __lowerCamelCase : str = func( A__ , **A__ ) with open(A__ , """wb""" ) as f: f.write(json.dumps(A__ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
519
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. SCREAMING_SNAKE_CASE : Optional[int] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'return_dict', 'callback', 'callback_steps', ] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,) __lowercase = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,) torch.manual_seed(0 ) __lowercase = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,) __lowercase = CLIPTextModel(lowercase__ ) __lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowercase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ): if str(lowercase__ ).startswith('''mps''' ): __lowercase = torch.manual_seed(lowercase__ ) else: __lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __lowercase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = TextToVideoSDPipeline(**lowercase__ ) __lowercase = sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) __lowercase = self.get_dummy_inputs(lowercase__ ) __lowercase = '''np''' __lowercase = sd_pipe(**lowercase__ ).frames __lowercase = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) __lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def SCREAMING_SNAKE_CASE ( self : Any ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): pass def SCREAMING_SNAKE_CASE ( self : List[str] ): return super().test_progress_bar() @slow @skip_mps class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) __lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowercase = pipe.to('''cuda''' ) __lowercase = '''Spiderman is surfing''' __lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames __lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) __lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __lowercase = pipe.to('''cuda''' ) __lowercase = '''Spiderman is surfing''' __lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames __lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
41
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : Optional[Any] = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
571
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _A ( A__ ): """simple docstring""" __lowercase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def _A ( A__ ): """simple docstring""" __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(A__ , A__ , bias=A__ ) __lowercase = emb.weight.data return lin_layer def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ): """simple docstring""" __lowercase = torch.load(A__ , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A__ ) __lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0] __lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ ) if mbart_aa and finetuned: __lowercase = '''relu''' __lowercase = state_dict['''decoder.embed_tokens.weight'''] __lowercase = MBartForConditionalGeneration(A__ ) model.model.load_state_dict(A__ ) if finetuned: __lowercase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
41
0
from __future__ import annotations def a (_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = str(A__ ) return len(A__ ) == 9 and set(A__ ) == set('''123456789''' ) def a (): for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ): SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_2 * base_num if is_9_pandigital(A__ ): return candidate for base_num in range(3_3_3 , 9_9 , -1 ): SCREAMING_SNAKE_CASE_ = 1_0_0_2_0_0_3 * base_num if is_9_pandigital(A__ ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
234
'''simple docstring''' import os from math import logaa def _A ( A__ = "base_exp.txt" ): """simple docstring""" __lowercase = 0 __lowercase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ): __lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) ) if x * logaa(A__ ) > largest: __lowercase = x * logaa(A__ ) __lowercase = i + 1 return result if __name__ == "__main__": print(solution())
41
0
'''simple docstring''' import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin __a: List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""") __a: List[Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") __a: Any = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = CamembertTokenizer SCREAMING_SNAKE_CASE = CamembertTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True def _lowerCAmelCase( self ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing lowercase__ : Optional[int] = CamembertTokenizer(lowercase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase( self ) -> int: lowercase__ : Dict = '''<pad>''' lowercase__ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(lowercase__ ) , 1004 ) def _lowerCAmelCase( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : Union[str, Any] = CamembertTokenizer(lowercase__ ) tokenizer.save_pretrained(self.tmpdirname ) lowercase__ : Optional[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) lowercase__ : Optional[int] = '''I was born in 92000, and this is falsé.''' lowercase__ : Dict = tokenizer.encode(lowercase__ ) lowercase__ : Optional[int] = rust_tokenizer.encode(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) lowercase__ : Optional[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) lowercase__ : Tuple = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) lowercase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase__ ) lowercase__ : Dict = rust_tokenizer.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) def _lowerCAmelCase( self ) -> int: if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : str = self.get_rust_tokenizer() lowercase__ : Any = '''I was born in 92000, and this is falsé.''' lowercase__ : List[str] = tokenizer.tokenize(lowercase__ ) lowercase__ : Optional[int] = rust_tokenizer.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) lowercase__ : Union[str, Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) lowercase__ : List[str] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) lowercase__ : Optional[Any] = self.get_rust_tokenizer() lowercase__ : Optional[Any] = tokenizer.encode(lowercase__ ) lowercase__ : List[str] = rust_tokenizer.encode(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) @slow def _lowerCAmelCase( self ) -> List[Any]: # fmt: off lowercase__ : str = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. lowercase__ : Any = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowercase__ , )
152
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small' SCREAMING_SNAKE_CASE : int = ['past_key_values'] SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,): __lowercase = vocab_size __lowercase = max_position_embeddings __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = use_cache __lowercase = encoder_layers __lowercase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,) class lowercase_ (lowerCamelCase__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase = {0: '''batch'''} __lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super().outputs else: __lowercase = super(lowercase__ ,self ).outputs if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) # Generate decoder inputs __lowercase = seq_length if not self.use_past else 1 __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __lowercase = dict(**lowercase__ ,**lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape __lowercase = common_inputs['''decoder_input_ids'''].shape[1] __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = decoder_seq_length + 3 __lowercase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 ) __lowercase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase , __lowercase = self.num_layers __lowercase = min(lowercase__ ,lowercase__ ) __lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers __lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase__ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), ) ) # TODO: test this. __lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase__ ,lowercase__ ): common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __lowercase = seqlen + 2 __lowercase , __lowercase = self.num_layers __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = common_inputs['''attention_mask'''].dtype __lowercase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 ) __lowercase = [ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ ) ] return common_inputs def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = tokenizer.num_special_tokens_to_add(lowercase__ ) __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ ) # Generate dummy inputs according to compute batch and sequence __lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): if self.task in ["default", "seq2seq-lm"]: __lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) elif self.task == "causal-lm": __lowercase = self._generate_dummy_inputs_for_causal_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) else: __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) else: __lowercase = super(lowercase__ ,self )._flatten_past_key_values_( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
41
0
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: lowercase__, lowercase__: Any = 9, 1_4 # noqa: F841 lowercase__: Tuple = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 1_4], [3, 4, 9], [5, 4, 1_0], [1, 7, 1_1], ] lowercase__: Dict = defaultdict(A__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) lowercase__: Any = mst(A__ ) lowercase__: Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: lowercase__: Tuple = tuple(answer[:2] ) lowercase__: Any = tuple(edge[::-1] ) assert edge in result or reverse in result
586
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ ): """simple docstring""" if b == 0: return (1, 0) ((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b ) __lowercase = a // b return (y, x - k * y) def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m def _A ( A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) if b < 0: __lowercase = (b % n + n) % n return b def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
41
0
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _SCREAMING_SNAKE_CASE : int = model_type_to_module_name(A__ ) _SCREAMING_SNAKE_CASE : Tuple = importlib.import_module(F""".{module_name}""" , """transformers.models""" ) try: return getattr(A__ , A__ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(A__ , """__name__""" , A__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _SCREAMING_SNAKE_CASE : Tuple = importlib.import_module("""transformers""" ) if hasattr(A__ , A__ ): return getattr(A__ , A__ ) return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , )-> Dict: _SCREAMING_SNAKE_CASE : List[str] = get_file_from_repo( A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , ) if resolved_config_file is None: logger.info( """Could not locate the image processor configuration file, will try to use the model config instead.""" ) return {} with open(A__ , encoding="""utf-8""" ) as reader: return json.load(A__ ) class _snake_case : """simple docstring""" def __init__( self : int): """simple docstring""" raise EnvironmentError( """AutoImageProcessor is designed to be instantiated """ """using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""") @classmethod @replace_list_option_in_docstrings(lowercase__) def _lowerCAmelCase ( cls : str , _A : str , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : int = kwargs.pop("""config""" , lowercase__) _SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""trust_remote_code""" , lowercase__) _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = ImageProcessingMixin.get_image_processor_dict(lowercase__ , **lowercase__) _SCREAMING_SNAKE_CASE : Tuple = config_dict.get("""image_processor_type""" , lowercase__) _SCREAMING_SNAKE_CASE : Optional[Any] = None if "AutoImageProcessor" in config_dict.get("""auto_map""" , {}): _SCREAMING_SNAKE_CASE : List[Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _SCREAMING_SNAKE_CASE : int = config_dict.pop("""feature_extractor_type""" , lowercase__) if feature_extractor_class is not None: logger.warning( """Could not find image processor class in the image processor config or the model config. Loading""" """ based on pattern matching with the model\'s feature extractor configuration.""") _SCREAMING_SNAKE_CASE : List[str] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""") if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {}): _SCREAMING_SNAKE_CASE : Tuple = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] _SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""") logger.warning( """Could not find image processor auto map in the image processor config or the model config.""" """ Loading based on pattern matching with the model\'s feature extractor configuration.""") # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(lowercase__ , lowercase__): _SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowercase__ , **lowercase__) # It could be in `config.image_processor_type`` _SCREAMING_SNAKE_CASE : str = getattr(lowercase__ , """image_processor_type""" , lowercase__) if hasattr(lowercase__ , """auto_map""") and "AutoImageProcessor" in config.auto_map: _SCREAMING_SNAKE_CASE : List[str] = config.auto_map["""AutoImageProcessor"""] if image_processor_class is not None: _SCREAMING_SNAKE_CASE : Optional[int] = image_processor_class_from_name(lowercase__) _SCREAMING_SNAKE_CASE : Any = image_processor_auto_map is not None _SCREAMING_SNAKE_CASE : Any = image_processor_class is not None or type(lowercase__) in IMAGE_PROCESSOR_MAPPING _SCREAMING_SNAKE_CASE : int = resolve_trust_remote_code( lowercase__ , lowercase__ , lowercase__ , lowercase__) if has_remote_code and trust_remote_code: _SCREAMING_SNAKE_CASE : Union[str, Any] = get_class_from_dynamic_module( lowercase__ , lowercase__ , **lowercase__) _SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""code_revision""" , lowercase__) if os.path.isdir(lowercase__): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(lowercase__ , **lowercase__) elif image_processor_class is not None: return image_processor_class.from_dict(lowercase__ , **lowercase__) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(lowercase__) in IMAGE_PROCESSOR_MAPPING: _SCREAMING_SNAKE_CASE : Any = IMAGE_PROCESSOR_MAPPING[type(lowercase__)] return image_processor_class.from_dict(lowercase__ , **lowercase__) raise ValueError( f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}""") @staticmethod def _lowerCAmelCase ( _A : Tuple , _A : Any): """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(lowercase__ , lowercase__)
338
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _A ( ): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __lowercase = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , A__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _A ( ): """simple docstring""" assert _test_patching.open is open __lowercase = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , A__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ): pass def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , A__ ) is None with patch_submodule(_test_patching , '''len''' , A__ ): assert _test_patching.len is mock assert _test_patching.len is len def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_start_and_stop_mock__''' __lowercase = patch_submodule(_test_patching , '''open''' , A__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _A ( ): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __lowercase = '''__test_patch_submodule_successive_join__''' __lowercase = '''__test_patch_submodule_successive_dirname__''' __lowercase = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ): pass
41
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class snake_case_ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = KandinskyVaaImgaImgPipeline snake_case__ = ['image_embeds', 'negative_image_embeds', 'image'] snake_case__ = [ 'image_embeds', 'negative_image_embeds', 'image', ] snake_case__ = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] snake_case__ = False @property def UpperCAmelCase__ (self: int ) -> int: '''simple docstring''' return 32 @property def UpperCAmelCase__ (self: Optional[Any] ) -> Optional[Any]: '''simple docstring''' return 32 @property def UpperCAmelCase__ (self: Optional[int] ) -> str: '''simple docstring''' return self.time_input_dim @property def UpperCAmelCase__ (self: Tuple ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase__ (self: Dict ) -> Dict: '''simple docstring''' return 100 @property def UpperCAmelCase__ (self: List[Any] ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __a : Any = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __a : List[str] = UNetaDConditionModel(**lowercase__ ) return model @property def UpperCAmelCase__ (self: Dict ) -> Tuple: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase__ (self: List[Any] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __a : int = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase__ (self: str ) -> List[str]: '''simple docstring''' __a : List[str] = self.dummy_unet __a : Optional[Any] = self.dummy_movq __a : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __a : Union[str, Any] = DDIMScheduler(**lowercase__ ) __a : Any = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCAmelCase__ (self: Union[str, Any] , __UpperCAmelCase: str , __UpperCAmelCase: List[str]=0 ) -> str: '''simple docstring''' __a : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__ ) ).to(lowercase__ ) __a : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowercase__ ) # create init_image __a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase__ ) ).to(lowercase__ ) __a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : Union[str, Any] = Image.fromarray(np.uinta(lowercase__ ) ).convert("RGB" ).resize((256, 256) ) if str(lowercase__ ).startswith("mps" ): __a : Optional[int] = torch.manual_seed(lowercase__ ) else: __a : Dict = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __a : int = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def UpperCAmelCase__ (self: Union[str, Any] ) -> Any: '''simple docstring''' __a : int = "cpu" __a : Tuple = self.get_dummy_components() __a : Optional[Any] = self.pipeline_class(**lowercase__ ) __a : Union[str, Any] = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) __a : Any = pipe(**self.get_dummy_inputs(lowercase__ ) ) __a : Dict = output.images __a : str = pipe( **self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0] __a : List[Any] = image[0, -3:, -3:, -1] __a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a : Union[str, Any] = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ (self: Dict ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ (self: List[Any] ) -> Optional[Any]: '''simple docstring''' __a : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) __a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __a : str = "A red cartoon frog, 4k" __a : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(lowercase__ ) __a : str = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) __a : Union[str, Any] = pipeline.to(lowercase__ ) pipeline.set_progress_bar_config(disable=lowercase__ ) __a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) __a , __a : Union[str, Any] = pipe_prior( lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __a : Any = pipeline( image=lowercase__ , image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) __a : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase__ , lowercase__ )
351
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase_ : """simple docstring""" def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __lowercase = ids_tensor([self.batch_size] ,self.num_choices ) __lowercase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return NezhaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.prepare_config_and_inputs() __lowercase = True __lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ): __lowercase = NezhaModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,): __lowercase = True __lowercase = NezhaModel(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,) __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): __lowercase = NezhaForMaskedLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): __lowercase = NezhaForNextSentencePrediction(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ): __lowercase = NezhaForPreTraining(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ): __lowercase = NezhaForQuestionAnswering(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ): __lowercase = self.num_labels __lowercase = NezhaForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ): __lowercase = self.num_labels __lowercase = NezhaForTokenClassification(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ): __lowercase = self.num_choices __lowercase = NezhaForMultipleChoice(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Tuple = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : List[str] = True def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ): __lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) if return_labels: if model_class in get_values(lowercase__ ): __lowercase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ ) __lowercase = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = NezhaModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : int ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): # This regression test was failing with PyTorch < 1.3 ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __lowercase = None self.model_tester.create_and_check_model_as_decoder( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = NezhaModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __lowercase = True __lowercase = model_class(config=lowercase__ ) __lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ) __lowercase = torch.jit.trace( lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) ) __lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ ) loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
41
0
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A_ : Optional[Any] = logging.get_logger(__name__) def __snake_case ( __A : Dict , __A : Dict=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE : Any = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): SCREAMING_SNAKE_CASE : Optional[int] = 'segformer.encoder.' + key if key.startswith('backbone' ): SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 SCREAMING_SNAKE_CASE : Optional[int] = key[key.find('patch_embed' ) + len('patch_embed' )] SCREAMING_SNAKE_CASE : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(A__ )-1}""" ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 SCREAMING_SNAKE_CASE : Optional[int] = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] SCREAMING_SNAKE_CASE : List[str] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(A__ )-1}""" ) if "layer_norm1" in key: SCREAMING_SNAKE_CASE : Any = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 SCREAMING_SNAKE_CASE : Optional[Any] = key[key.find('block' ) + len('block' )] SCREAMING_SNAKE_CASE : str = key.replace(F"""block{idx}""" , F"""block.{int(A__ )-1}""" ) if "attn.q" in key: SCREAMING_SNAKE_CASE : Dict = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: SCREAMING_SNAKE_CASE : Tuple = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('attn' , 'attention.self' ) if "fc1" in key: SCREAMING_SNAKE_CASE : List[Any] = key.replace('fc1' , 'dense1' ) if "fc2" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: SCREAMING_SNAKE_CASE : Dict = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: SCREAMING_SNAKE_CASE : Any = key.replace('linear_fuse.conv' , 'linear_fuse' ) SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 SCREAMING_SNAKE_CASE : str = key[key.find('linear_c' ) + len('linear_c' )] SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(A__ )-1}""" ) if key.startswith('head' ): SCREAMING_SNAKE_CASE : List[str] = key.replace('head' , 'classifier' ) SCREAMING_SNAKE_CASE : List[Any] = value return new_state_dict def __snake_case ( __A : List[Any] , __A : Tuple ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) SCREAMING_SNAKE_CASE : Any = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) SCREAMING_SNAKE_CASE : int = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = kv_weight[ : config.hidden_sizes[i], : ] SCREAMING_SNAKE_CASE : str = kv_bias[: config.hidden_sizes[i]] SCREAMING_SNAKE_CASE : List[str] = kv_weight[ config.hidden_sizes[i] :, : ] SCREAMING_SNAKE_CASE : List[str] = kv_bias[ config.hidden_sizes[i] : ] def __snake_case ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(A__ , stream=A__ ).raw ) return image @torch.no_grad() def __snake_case ( __A : Dict , __A : List[str] , __A : int ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = SegformerConfig() SCREAMING_SNAKE_CASE : Optional[int] = False # set attributes based on model_name SCREAMING_SNAKE_CASE : Dict = 'huggingface/label-files' if "segformer" in model_name: SCREAMING_SNAKE_CASE : Tuple = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: SCREAMING_SNAKE_CASE : Union[str, Any] = 150 SCREAMING_SNAKE_CASE : str = 'ade20k-id2label.json' SCREAMING_SNAKE_CASE : int = (1, 150, 128, 128) elif "city" in model_name: SCREAMING_SNAKE_CASE : Dict = 19 SCREAMING_SNAKE_CASE : Optional[int] = 'cityscapes-id2label.json' SCREAMING_SNAKE_CASE : List[Any] = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[Any] = model_name[4:6] SCREAMING_SNAKE_CASE : List[Any] = 1000 SCREAMING_SNAKE_CASE : List[str] = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE : Optional[int] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE : Optional[int] = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = idalabel SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": SCREAMING_SNAKE_CASE : Any = [64, 128, 320, 512] SCREAMING_SNAKE_CASE : Dict = 256 elif size == "b2": SCREAMING_SNAKE_CASE : int = [64, 128, 320, 512] SCREAMING_SNAKE_CASE : Dict = 768 SCREAMING_SNAKE_CASE : Dict = [3, 4, 6, 3] elif size == "b3": SCREAMING_SNAKE_CASE : List[str] = [64, 128, 320, 512] SCREAMING_SNAKE_CASE : Optional[Any] = 768 SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 4, 18, 3] elif size == "b4": SCREAMING_SNAKE_CASE : Optional[int] = [64, 128, 320, 512] SCREAMING_SNAKE_CASE : Any = 768 SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 8, 27, 3] elif size == "b5": SCREAMING_SNAKE_CASE : Tuple = [64, 128, 320, 512] SCREAMING_SNAKE_CASE : List[str] = 768 SCREAMING_SNAKE_CASE : Optional[int] = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) SCREAMING_SNAKE_CASE : Any = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=A__ , align=A__ , do_random_crop=A__ ) # prepare image SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A__ , return_tensors='pt' ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: SCREAMING_SNAKE_CASE : Optional[int] = torch.load(A__ , map_location=torch.device('cpu' ) ) else: SCREAMING_SNAKE_CASE : List[Any] = torch.load(A__ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys SCREAMING_SNAKE_CASE : Dict = rename_keys(A__ , encoder_only=A__ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(A__ , A__ ) # create HuggingFace model and load state dict if encoder_only: SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : str = SegformerForImageClassification(A__ ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = SegformerForSemanticSegmentation(A__ ) model.load_state_dict(A__ ) model.eval() # forward pass SCREAMING_SNAKE_CASE : Optional[int] = model(A__ ) SCREAMING_SNAKE_CASE : str = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -10.3529, -10.0304], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": SCREAMING_SNAKE_CASE : str = torch.tensor( [ [[-9.0_8_7_8, -10.2081, -10.1891], [-9.3_1_4_4, -10.7941, -10.9843], [-9.2_2_9_4, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": SCREAMING_SNAKE_CASE : Dict = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": SCREAMING_SNAKE_CASE : int = torch.tensor( [ [[-9.5_5_2_4, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5_8_4_2, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": SCREAMING_SNAKE_CASE : str = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -10.1717], [-9.4_4_3_8, -10.9058, -11.4047], [-9.7_9_3_9, -12.3495, -12.1079]], [[-7.1_5_1_4, -9.5_3_3_6, -10.0860], [-9.7_7_7_6, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [ [ [-1.1372E01, -1.2787E01, -1.3477E01], [-1.2536E01, -1.4194E01, -1.4409E01], [-1.3217E01, -1.4888E01, -1.5327E01], ], [ [-1.4791E01, -1.7122E01, -1.8277E01], [-1.7163E01, -1.9192E01, -1.9533E01], [-1.7897E01, -1.9991E01, -2.0315E01], ], [ [7.6723E-01, 4.1921E-01, -7.7878E-02], [4.7772E-01, 9.5557E-03, -2.8082E-01], [3.6032E-01, -2.4826E-01, -5.1168E-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": SCREAMING_SNAKE_CASE : str = torch.tensor( [ [[-9.4_9_5_9, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8_9_0_5, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": SCREAMING_SNAKE_CASE : Dict = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": SCREAMING_SNAKE_CASE : int = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": SCREAMING_SNAKE_CASE : Any = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: SCREAMING_SNAKE_CASE : Any = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": A_ : Dict = argparse.ArgumentParser() parser.add_argument( '--model_name', default='segformer.b0.512x512.ade.160k', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) A_ : List[Any] = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
265
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''KEY''') lowerCAmelCase__ = TypeVar('''VAL''') @dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ ) class lowercase_ (Generic[KEY, VAL] ): """simple docstring""" SCREAMING_SNAKE_CASE : KEY SCREAMING_SNAKE_CASE : VAL class lowercase_ (_Item ): """simple docstring""" def __init__( self : Optional[int] ): super().__init__(lowercase__ ,lowercase__ ) def __bool__( self : List[str] ): return False lowerCAmelCase__ = _DeletedItem() class lowercase_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ): __lowercase = initial_block_size __lowercase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowercase = capacity_factor __lowercase = 0 def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ): return hash(lowercase__ ) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ): return (ind + 1) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ): __lowercase = self._buckets[ind] if not stored: __lowercase = _Item(lowercase__ ,lowercase__ ) self._len += 1 return True elif stored.key == key: __lowercase = _Item(lowercase__ ,lowercase__ ) return True else: return False def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): if len(self._buckets ) <= self._initial_block_size: return False __lowercase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ): __lowercase = self._buckets __lowercase = [None] * new_size __lowercase = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def SCREAMING_SNAKE_CASE ( self : str ): self._resize(len(self._buckets ) * 2 ) def SCREAMING_SNAKE_CASE ( self : Tuple ): self._resize(len(self._buckets ) // 2 ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ): __lowercase = self._get_bucket_index(lowercase__ ) for _ in range(len(self._buckets ) ): yield ind __lowercase = self._get_next_ind(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): for ind in self._iterate_buckets(lowercase__ ): if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ): break def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): if self._is_full(): self._size_up() self._add_item(lowercase__ ,lowercase__ ) def __delitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: raise KeyError(lowercase__ ) if item is _deleted: continue if item.key == key: __lowercase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowercase__ ) def __len__( self : Optional[int] ): return self._len def __iter__( self : str ): yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ): __lowercase = ''' ,'''.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
41
0
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __A ( lowerCamelCase__ ): UpperCamelCase = 42 class __A ( lowerCamelCase__ , lowerCamelCase__ ): UpperCamelCase = True @register_to_config def __init__( self :Optional[int] , __snake_case :int = 3 , __snake_case :int = 3 , __snake_case :Tuple[str] = ("DownEncoderBlock2D",) , __snake_case :Tuple[str] = ("UpDecoderBlock2D",) , __snake_case :Tuple[int] = (64,) , __snake_case :int = 1 , __snake_case :str = "silu" , __snake_case :int = 4 , __snake_case :int = 32 , __snake_case :int = 32 , __snake_case :float = 0.18215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __magic_name__ : Any =Encoder( in_channels=lowercase__ , out_channels=lowercase__ , down_block_types=lowercase__ , block_out_channels=lowercase__ , layers_per_block=lowercase__ , act_fn=lowercase__ , norm_num_groups=lowercase__ , double_z=lowercase__ , ) # pass init params to Decoder __magic_name__ : Any =Decoder( in_channels=lowercase__ , out_channels=lowercase__ , up_block_types=lowercase__ , block_out_channels=lowercase__ , layers_per_block=lowercase__ , norm_num_groups=lowercase__ , act_fn=lowercase__ , ) __magic_name__ : Union[str, Any] =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __magic_name__ : int =nn.Convad(lowercase__ , lowercase__ , 1 ) __magic_name__ : Tuple =False __magic_name__ : Union[str, Any] =False # only relevant if vae tiling is enabled __magic_name__ : List[str] =self.config.sample_size __magic_name__ : Tuple =( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __magic_name__ : List[Any] =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __magic_name__ : str =0.25 def A__ ( self :Optional[int] , __snake_case :str , __snake_case :str=False ): '''simple docstring''' if isinstance(lowercase__ , (Encoder, Decoder) ): __magic_name__ : Dict =value def A__ ( self :Any , __snake_case :bool = True ): '''simple docstring''' __magic_name__ : Tuple =use_tiling def A__ ( self :int ): '''simple docstring''' self.enable_tiling(lowercase__ ) def A__ ( self :Optional[int] ): '''simple docstring''' __magic_name__ : str =True def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : Optional[Any] =False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : Any ={} def fn_recursive_add_processors(__snake_case :str , __snake_case :torch.nn.Module , __snake_case :Dict[str, AttentionProcessor] ): if hasattr(lowercase__ , """set_processor""" ): __magic_name__ : Optional[Any] =module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , lowercase__ , lowercase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowercase__ , lowercase__ , lowercase__ ) return processors def A__ ( self :Union[str, Any] , __snake_case :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __magic_name__ : List[Any] =len(self.attn_processors.keys() ) if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(lowercase__ )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__snake_case :str , __snake_case :torch.nn.Module , __snake_case :Tuple ): if hasattr(lowercase__ , """set_processor""" ): if not isinstance(lowercase__ , lowercase__ ): module.set_processor(lowercase__ ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , lowercase__ , lowercase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowercase__ , lowercase__ , lowercase__ ) def A__ ( self :Dict ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def A__ ( self :Optional[int] , __snake_case :torch.FloatTensor , __snake_case :bool = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowercase__ , return_dict=lowercase__ ) if self.use_slicing and x.shape[0] > 1: __magic_name__ : str =[self.encoder(lowercase__ ) for x_slice in x.split(1 )] __magic_name__ : Any =torch.cat(lowercase__ ) else: __magic_name__ : Tuple =self.encoder(lowercase__ ) __magic_name__ : Optional[Any] =self.quant_conv(lowercase__ ) __magic_name__ : Dict =DiagonalGaussianDistribution(lowercase__ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowercase__ ) def A__ ( self :List[str] , __snake_case :torch.FloatTensor , __snake_case :bool = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowercase__ , return_dict=lowercase__ ) __magic_name__ : Optional[Any] =self.post_quant_conv(lowercase__ ) __magic_name__ : List[Any] =self.decoder(lowercase__ ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase__ ) @apply_forward_hook def A__ ( self :int , __snake_case :torch.FloatTensor , __snake_case :bool = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __magic_name__ : str =[self._decode(lowercase__ ).sample for z_slice in z.split(1 )] __magic_name__ : Tuple =torch.cat(lowercase__ ) else: __magic_name__ : Union[str, Any] =self._decode(lowercase__ ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowercase__ ) def A__ ( self :Any , __snake_case :Tuple , __snake_case :Optional[int] , __snake_case :Tuple ): '''simple docstring''' __magic_name__ : List[str] =min(a.shape[2] , b.shape[2] , lowercase__ ) for y in range(lowercase__ ): __magic_name__ : List[Any] =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def A__ ( self :Optional[int] , __snake_case :Any , __snake_case :Union[str, Any] , __snake_case :Any ): '''simple docstring''' __magic_name__ : Dict =min(a.shape[3] , b.shape[3] , lowercase__ ) for x in range(lowercase__ ): __magic_name__ : List[Any] =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def A__ ( self :int , __snake_case :torch.FloatTensor , __snake_case :bool = True ): '''simple docstring''' __magic_name__ : List[Any] =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __magic_name__ : Dict =int(self.tile_latent_min_size * self.tile_overlap_factor ) __magic_name__ : Any =self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __magic_name__ : Union[str, Any] =[] for i in range(0 , x.shape[2] , lowercase__ ): __magic_name__ : Optional[Any] =[] for j in range(0 , x.shape[3] , lowercase__ ): __magic_name__ : int =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __magic_name__ : List[str] =self.encoder(lowercase__ ) __magic_name__ : Any =self.quant_conv(lowercase__ ) row.append(lowercase__ ) rows.append(lowercase__ ) __magic_name__ : List[Any] =[] for i, row in enumerate(lowercase__ ): __magic_name__ : Tuple =[] for j, tile in enumerate(lowercase__ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __magic_name__ : Optional[int] =self.blend_v(rows[i - 1][j] , lowercase__ , lowercase__ ) if j > 0: __magic_name__ : Union[str, Any] =self.blend_h(row[j - 1] , lowercase__ , lowercase__ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowercase__ , dim=3 ) ) __magic_name__ : Optional[int] =torch.cat(lowercase__ , dim=2 ) __magic_name__ : Union[str, Any] =DiagonalGaussianDistribution(lowercase__ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowercase__ ) def A__ ( self :List[str] , __snake_case :torch.FloatTensor , __snake_case :bool = True ): '''simple docstring''' __magic_name__ : Any =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __magic_name__ : int =int(self.tile_sample_min_size * self.tile_overlap_factor ) __magic_name__ : Optional[Any] =self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __magic_name__ : List[str] =[] for i in range(0 , z.shape[2] , lowercase__ ): __magic_name__ : Any =[] for j in range(0 , z.shape[3] , lowercase__ ): __magic_name__ : Optional[int] =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __magic_name__ : Optional[Any] =self.post_quant_conv(lowercase__ ) __magic_name__ : Optional[int] =self.decoder(lowercase__ ) row.append(lowercase__ ) rows.append(lowercase__ ) __magic_name__ : Optional[Any] =[] for i, row in enumerate(lowercase__ ): __magic_name__ : Tuple =[] for j, tile in enumerate(lowercase__ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __magic_name__ : Dict =self.blend_v(rows[i - 1][j] , lowercase__ , lowercase__ ) if j > 0: __magic_name__ : Tuple =self.blend_h(row[j - 1] , lowercase__ , lowercase__ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowercase__ , dim=3 ) ) __magic_name__ : List[Any] =torch.cat(lowercase__ , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase__ ) def A__ ( self :Tuple , __snake_case :torch.FloatTensor , __snake_case :bool = False , __snake_case :bool = True , __snake_case :Optional[torch.Generator] = None , ): '''simple docstring''' __magic_name__ : Any =sample __magic_name__ : List[Any] =self.encode(lowercase__ ).latent_dist if sample_posterior: __magic_name__ : Optional[int] =posterior.sample(generator=lowercase__ ) else: __magic_name__ : List[Any] =posterior.mode() __magic_name__ : Union[str, Any] =self.decode(lowercase__ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase__ )
21
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ,**lowercase__ : Tuple ): super().__init__(**lowercase__ ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self ,'''vision''' ) self.check_model_type(lowercase__ ) def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,): if "text_queries" in kwargs: __lowercase = kwargs.pop('''text_queries''' ) if isinstance(lowercase__ ,(str, Image.Image) ): __lowercase = {'''image''': image, '''candidate_labels''': candidate_labels} else: __lowercase = image __lowercase = super().__call__(lowercase__ ,**lowercase__ ) return results def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ): __lowercase = {} if "threshold" in kwargs: __lowercase = kwargs['''threshold'''] if "top_k" in kwargs: __lowercase = kwargs['''top_k'''] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ): __lowercase = load_image(inputs['''image'''] ) __lowercase = inputs['''candidate_labels'''] if isinstance(lowercase__ ,lowercase__ ): __lowercase = candidate_labels.split(''',''' ) __lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa ) for i, candidate_label in enumerate(lowercase__ ): __lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework ) __lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework ) yield { "is_last": i == len(lowercase__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ): __lowercase = model_inputs.pop('''target_size''' ) __lowercase = model_inputs.pop('''candidate_label''' ) __lowercase = model_inputs.pop('''is_last''' ) __lowercase = self.model(**lowercase__ ) __lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ): __lowercase = [] for model_output in model_outputs: __lowercase = model_output['''candidate_label'''] __lowercase = BaseModelOutput(lowercase__ ) __lowercase = self.image_processor.post_process_object_detection( outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): __lowercase = outputs['''scores'''][index].item() __lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] ) __lowercase = {'''score''': score, '''label''': label, '''box''': box} results.append(lowercase__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ ) if top_k: __lowercase = results[:top_k] return results def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ): if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) __lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist() __lowercase = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
41
0
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : '''simple docstring''' def __init__( self ,lowerCamelCase_ ) -> Optional[Any]: A = data # Initialize hash values A = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants A = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] A = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase__ ( lowerCamelCase_ ) -> Union[str, Any]: A = b"""\x80""" + (b"""\x00""" * (6_3 - (len(lowercase__ ) + 8) % 6_4)) A = struct.pack(""">Q""" ,(len(lowercase__ ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase__ ( self ) -> List[str]: # Convert into blocks of 64 bytes A = [ self.preprocessed_data[x : x + 6_4] for x in range(0 ,len(self.preprocessed_data ) ,6_4 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A = list(struct.unpack(""">16L""" ,lowercase__ ) ) # add 48 0-ed integers words += [0] * 4_8 A , A , A , A , A , A , A , A = self.hashes for index in range(0 ,6_4 ): if index > 1_5: # modify the zero-ed indexes at the end of the array A = ( self.ror(words[index - 1_5] ,7 ) ^ self.ror(words[index - 1_5] ,1_8 ) ^ (words[index - 1_5] >> 3) ) A = ( self.ror(words[index - 2] ,1_7 ) ^ self.ror(words[index - 2] ,1_9 ) ^ (words[index - 2] >> 1_0) ) A = ( words[index - 1_6] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A = self.ror(lowercase__ ,6 ) ^ self.ror(lowercase__ ,1_1 ) ^ self.ror(lowercase__ ,2_5 ) A = (e & f) ^ ((~e & 0xFFFFFFFF) & g) A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A = self.ror(lowercase__ ,2 ) ^ self.ror(lowercase__ ,1_3 ) ^ self.ror(lowercase__ ,2_2 ) A = (a & b) ^ (a & c) ^ (b & c) A = (sa + maj) % 0x100000000 A , A , A , A , A , A , A , A = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A = [a, b, c, d, e, f, g, h] # Modify final values A = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A = """""".join([hex(lowercase__ )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any: return 0xFFFFFFFF & (value << (3_2 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ) -> str: import hashlib A = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(lowercase__ ).hash ,hashlib.shaaaa(lowercase__ ).hexdigest() ) def _A ( ): """simple docstring""" import doctest doctest.testmod() A = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) A = parser.parse_args() A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: A = f.read() else: A = bytes(A__ , """utf-8""" ) print(SHAaaa(A__ ).hash ) if __name__ == "__main__": main()
617
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli' SCREAMING_SNAKE_CASE : Optional[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) SCREAMING_SNAKE_CASE : Any = 'text_classifier' SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']] SCREAMING_SNAKE_CASE : List[str] = ['text'] def SCREAMING_SNAKE_CASE ( self : List[Any] ): super().setup() __lowercase = self.model.config __lowercase = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __lowercase = int(lowercase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ): __lowercase = labels return self.pre_processor( [text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = outputs.logits __lowercase = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
41
0
"""simple docstring""" import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase_ : Any = getLogger(__name__) lowerCAmelCase_ : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu''' def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 8 , lowerCAmelCase = DEFAULT_DEVICE , lowerCAmelCase=False , lowerCAmelCase="summarization" , lowerCAmelCase=None , **lowerCAmelCase , ): '''simple docstring''' UpperCAmelCase = Path(A__ ).open("""w""" , encoding="""utf-8""" ) UpperCAmelCase = str(A__ ) UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ ) if fpaa: UpperCAmelCase = model.half() UpperCAmelCase = AutoTokenizer.from_pretrained(A__ ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. UpperCAmelCase = time.time() # update config with task specific params use_task_specific_params(A__ , A__ ) if prefix is None: UpperCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """""" for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ): UpperCAmelCase = [prefix + text for text in examples_chunk] UpperCAmelCase = tokenizer(A__ , return_tensors="""pt""" , truncation=A__ , padding="""longest""" ).to(A__ ) UpperCAmelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , ) UpperCAmelCase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() UpperCAmelCase = int(time.time() - start_time ) # seconds UpperCAmelCase = len(A__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _lowerCAmelCase ( ): '''simple docstring''' return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def _lowerCAmelCase ( lowerCAmelCase=True ): '''simple docstring''' UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""model_name""" , type=A__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" , type=A__ , help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" , type=A__ , help="""where to save summaries""" ) parser.add_argument("""--reference_path""" , type=A__ , required=A__ , help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" , type=A__ , required=A__ , default="""metrics.json""" , help="""where to save metrics""" ) parser.add_argument("""--device""" , type=A__ , required=A__ , default=A__ , help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" , type=A__ , required=A__ , default=A__ , help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" , type=A__ , default="""summarization""" , help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" , type=A__ , default=8 , required=A__ , help="""batch size""" ) parser.add_argument( """--n_obs""" , type=A__ , default=-1 , required=A__ , help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" , action="""store_true""" ) parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" , nargs="""?""" , type=A__ , const=datetime_now() , help=( """use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() UpperCAmelCase = parse_numeric_n_bool_cl_kwargs(A__ ) if parsed_args and verbose: print(F'''parsed the following generate kwargs: {parsed_args}''' ) UpperCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: UpperCAmelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=A__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can\'t mix --fp16 and --device cpu""" ) UpperCAmelCase = generate_summaries_or_translations( A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , ) if args.reference_path is None: return {} # Compute scores UpperCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge UpperCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()] UpperCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )] UpperCAmelCase = score_fn(A__ , A__ ) scores.update(A__ ) if args.dump_args: scores.update(A__ ) if args.info: UpperCAmelCase = args.info if verbose: print(A__ ) if args.score_path is not None: json.dump(A__ , open(args.score_path , """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
673
'''simple docstring''' from collections.abc import Callable class lowercase_ : """simple docstring""" def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ): # Stores actual heap items. __lowercase = [] # Stores indexes of each item for supporting updates and deletion. __lowercase = {} # Stores current size of heap. __lowercase = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __lowercase = key or (lambda lowercase__ : x) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ): return int((i - 1) / 2 ) if i > 0 else None def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = int(2 * i + 1 ) return left if 0 < left < self.size else None def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ): __lowercase = int(2 * i + 2 ) return right if 0 < right < self.size else None def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ): __lowercase , __lowercase = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __lowercase , __lowercase = self.arr[j], self.arr[i] def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): return self.arr[i][1] < self.arr[j][1] def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._left(lowercase__ ) __lowercase = self._right(lowercase__ ) __lowercase = i if left is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = left if right is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = right return valid_parent def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._parent(lowercase__ ) while parent is not None and not self._cmp(lowercase__ ,lowercase__ ): self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = parent, self._parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = self._get_valid_parent(lowercase__ ) while valid_parent != index: self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] __lowercase = [item, self.key(lowercase__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] del self.pos_map[item] __lowercase = self.arr[self.size - 1] __lowercase = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ): __lowercase = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowercase__ )] ) else: __lowercase = [item, self.key(lowercase__ )] __lowercase = self.size self.size += 1 self._heapify_up(self.size - 1 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): return self.arr[0] if self.size else None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _A ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
41
0
from collections.abc import Callable class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[int] , _lowerCamelCase : Callable | None = None ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = [] # Stores indexes of each item for supporting updates and deletion. __lowerCamelCase : Optional[int] = {} # Stores current size of heap. __lowerCamelCase : Any = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __lowerCamelCase : Union[str, Any] = key or (lambda _lowerCamelCase : x) def _snake_case ( self : Tuple , _lowerCamelCase : int ): '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def _snake_case ( self : Optional[int] , _lowerCamelCase : int ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = int(2 * i + 1 ) return left if 0 < left < self.size else None def _snake_case ( self : Optional[Any] , _lowerCamelCase : int ): '''simple docstring''' __lowerCamelCase : Tuple = int(2 * i + 2 ) return right if 0 < right < self.size else None def _snake_case ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ): '''simple docstring''' __lowerCamelCase , __lowerCamelCase : Tuple = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __lowerCamelCase , __lowerCamelCase : Optional[int] = self.arr[j], self.arr[i] def _snake_case ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int ): '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def _snake_case ( self : int , _lowerCamelCase : int ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = self._left(lowercase__ ) __lowerCamelCase : Union[str, Any] = self._right(lowercase__ ) __lowerCamelCase : Optional[int] = i if left is not None and not self._cmp(lowercase__ , lowercase__ ): __lowerCamelCase : Any = left if right is not None and not self._cmp(lowercase__ , lowercase__ ): __lowerCamelCase : List[str] = right return valid_parent def _snake_case ( self : int , _lowerCamelCase : int ): '''simple docstring''' __lowerCamelCase : Optional[int] = self._parent(lowercase__ ) while parent is not None and not self._cmp(lowercase__ , lowercase__ ): self._swap(lowercase__ , lowercase__ ) __lowerCamelCase , __lowerCamelCase : Union[str, Any] = parent, self._parent(lowercase__ ) def _snake_case ( self : Optional[int] , _lowerCamelCase : int ): '''simple docstring''' __lowerCamelCase : Any = self._get_valid_parent(lowercase__ ) while valid_parent != index: self._swap(lowercase__ , lowercase__ ) __lowerCamelCase , __lowerCamelCase : List[Any] = valid_parent, self._get_valid_parent(lowercase__ ) def _snake_case ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int ): '''simple docstring''' if item not in self.pos_map: return __lowerCamelCase : List[str] = self.pos_map[item] __lowerCamelCase : Union[str, Any] = [item, self.key(lowercase__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def _snake_case ( self : int , _lowerCamelCase : int ): '''simple docstring''' if item not in self.pos_map: return __lowerCamelCase : List[Any] = self.pos_map[item] del self.pos_map[item] __lowerCamelCase : Optional[int] = self.arr[self.size - 1] __lowerCamelCase : Tuple = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def _snake_case ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : int ): '''simple docstring''' __lowerCamelCase : Optional[int] = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowercase__ )] ) else: __lowerCamelCase : Dict = [item, self.key(lowercase__ )] __lowerCamelCase : Optional[Any] = self.size self.size += 1 self._heapify_up(self.size - 1 ) def _snake_case ( self : List[Any] ): '''simple docstring''' return self.arr[0] if self.size else None def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : int = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _UpperCAmelCase ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
519
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ): __lowercase = [] def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ): self.events.append('''on_init_end''' ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ): self.events.append('''on_train_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ): self.events.append('''on_train_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ): self.events.append('''on_epoch_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ): self.events.append('''on_epoch_end''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_step_begin''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ): self.events.append('''on_step_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ): self.events.append('''on_evaluate''' ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ): self.events.append('''on_predict''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ): self.events.append('''on_save''' ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_log''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ): self.events.append('''on_prediction_step''' ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): shutil.rmtree(self.output_dir ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ ) __lowercase = RegressionPreTrainedModel(lowercase__ ) __lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ ) return Trainer( lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) ) # Order doesn't matter __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase__ ,lowercase__ ): if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,lowercase__ ) elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,cba.__class__ ) elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(cba.__class__ ,lowercase__ ) else: self.assertEqual(lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ): __lowercase = ['''on_init_end''', '''on_train_begin'''] __lowercase = 0 __lowercase = len(trainer.get_eval_dataloader() ) __lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(lowercase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.get_trainer() __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # Callbacks passed at init are added to the default callbacks __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback __lowercase = self.get_trainer(disable_tqdm=lowercase__ ) __lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] __lowercase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(cb.__class__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # We can also add, pop, or remove by instance __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(lowercase__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' ,category=lowercase__ ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # Independent log/save/eval __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # A bit of everything __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(lowercase__ ) in warn_mock.call_args[0][0]
41
0
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Tuple ): super().__init__() A__ = nn.Linear(3 , 4 ) A__ = nn.BatchNormad(4 ) A__ = nn.Linear(4 , 5 ) def A__ ( self : List[str] , _lowerCamelCase : List[Any] ): return self.lineara(self.batchnorm(self.lineara(lowercase__ ) ) ) class UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" def A__ ( self : List[Any] , _lowerCamelCase : Dict , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" def A__ ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ): return output + 1 class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def A__ ( self : str ): A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(lowercase__ , lowercase__ ) self.assertEqual(test_model._hf_hook , lowercase__ ) self.assertTrue(hasattr(lowercase__ , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(lowercase__ ) self.assertFalse(hasattr(lowercase__ , '''_hf_hook''' ) ) self.assertFalse(hasattr(lowercase__ , '''_old_forward''' ) ) def A__ ( self : List[str] ): A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(lowercase__ , lowercase__ ) add_hook_to_module(lowercase__ , lowercase__ , append=lowercase__ ) self.assertEqual(isinstance(test_model._hf_hook , lowercase__ ) , lowercase__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowercase__ , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(lowercase__ ) self.assertFalse(hasattr(lowercase__ , '''_hf_hook''' ) ) self.assertFalse(hasattr(lowercase__ , '''_old_forward''' ) ) def A__ ( self : Optional[int] ): A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(x + 1 ) A__ = test_model(x + 2 ) A__ = PreForwardHook() add_hook_to_module(lowercase__ , lowercase__ ) A__ = test_model(lowercase__ ) self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PreForwardHook() add_hook_to_module(lowercase__ , lowercase__ ) A__ = test_model(lowercase__ ) self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowercase__ , lowercase__ ) A__ = test_model(lowercase__ ) assert torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) def A__ ( self : Optional[int] ): A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(lowercase__ ) A__ = PostForwardHook() add_hook_to_module(lowercase__ , lowercase__ ) A__ = test_model(lowercase__ ) self.assertTrue(torch.allclose(lowercase__ , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PostForwardHook() add_hook_to_module(lowercase__ , lowercase__ ) A__ = test_model(lowercase__ ) self.assertTrue(torch.allclose(lowercase__ , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowercase__ , lowercase__ ) A__ = test_model(lowercase__ ) assert torch.allclose(lowercase__ , output + 2 , atol=1E-5 ) def A__ ( self : int ): A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(lowercase__ ) A__ = PostForwardHook() add_hook_to_module(lowercase__ , lowercase__ ) A__ = test_model(lowercase__ ) self.assertTrue(torch.allclose(lowercase__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) A__ = True A__ = test_model(lowercase__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def A__ ( self : Dict ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device A__ = torch.randn(2 , 3 ) A__ = model(lowercase__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowercase__ , AlignDevicesHook(io_same_device=lowercase__ ) ) A__ = torch.randn(2 , 3 ).to(0 ) A__ = model(lowercase__ ) self.assertEqual(output.device , torch.device(0 ) ) def A__ ( self : str ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices A__ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(hook_kwargs['''execution_device'''] ) self.assertEqual(model.batchnorm.running_mean.device , lowercase__ ) A__ = torch.randn(2 , 3 ) A__ = model(lowercase__ ) self.assertEqual(output.device , lowercase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload A__ = { '''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True, '''offload_buffers''': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) A__ = torch.randn(2 , 3 ) A__ = model(lowercase__ ) self.assertEqual(output.device , lowercase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def A__ ( self : int ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook(lowercase__ , execution_device=lowercase__ , offload=lowercase__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(lowercase__ ) self.assertEqual(model.batchnorm.running_mean.device , lowercase__ ) A__ = torch.randn(2 , 3 ) A__ = model(lowercase__ ) self.assertEqual(output.device , lowercase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowercase__ , execution_device=lowercase__ , offload=lowercase__ , offload_buffers=lowercase__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) A__ = torch.randn(2 , 3 ) A__ = model(lowercase__ ) self.assertEqual(output.device , lowercase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def A__ ( self : List[Any] ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook( lowercase__ , execution_device=lowercase__ , offload=lowercase__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(lowercase__ ) self.assertEqual(model.batchnorm.running_mean.device , lowercase__ ) A__ = torch.randn(2 , 3 ) A__ = model(lowercase__ ) self.assertEqual(output.device , lowercase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowercase__ , execution_device=lowercase__ , offload=lowercase__ , weights_map=model.state_dict() , offload_buffers=lowercase__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) A__ = torch.randn(2 , 3 ) A__ = model(lowercase__ ) self.assertEqual(output.device , lowercase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase__ ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
571
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : jnp.ndarray SCREAMING_SNAKE_CASE : jnp.ndarray class lowercase_ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __lowercase = [] for i in range(len(self.block_out_channels ) - 1 ): __lowercase = self.block_out_channels[i] __lowercase = self.block_out_channels[i + 1] __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = blocks __lowercase = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : List[str] ,lowercase__ : Optional[int] ): __lowercase = self.conv_in(lowercase__ ) __lowercase = nn.silu(lowercase__ ) for block in self.blocks: __lowercase = block(lowercase__ ) __lowercase = nn.silu(lowercase__ ) __lowercase = self.conv_out(lowercase__ ) return embedding @flax_register_to_config class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 3_2 SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) SCREAMING_SNAKE_CASE : int = 2 SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None SCREAMING_SNAKE_CASE : int = 1_2_8_0 SCREAMING_SNAKE_CASE : float = 0.0 SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa SCREAMING_SNAKE_CASE : bool = True SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : str = "rgb" SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ): # init input tensors __lowercase = (1, self.in_channels, self.sample_size, self.sample_size) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase = jnp.ones((1,) ,dtype=jnp.intaa ) __lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) __lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase , __lowercase = jax.random.split(lowercase__ ) __lowercase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"] def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.block_out_channels __lowercase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowercase = self.num_attention_heads or self.attention_head_dim # input __lowercase = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time __lowercase = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) __lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype ) __lowercase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) __lowercase = self.only_cross_attention if isinstance(lowercase__ ,lowercase__ ): __lowercase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = (num_attention_heads,) * len(self.down_block_types ) # down __lowercase = [] __lowercase = [] __lowercase = block_out_channels[0] __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) for i, down_block_type in enumerate(self.down_block_types ): __lowercase = output_channel __lowercase = block_out_channels[i] __lowercase = i == len(lowercase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowercase = FlaxCrossAttnDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: __lowercase = FlaxDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(lowercase__ ) for _ in range(self.layers_per_block ): __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) if not is_final_block: __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) __lowercase = down_blocks __lowercase = controlnet_down_blocks # mid __lowercase = block_out_channels[-1] __lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,): __lowercase = self.controlnet_conditioning_channel_order if channel_order == "bgr": __lowercase = jnp.flip(lowercase__ ,axis=1 ) # 1. time if not isinstance(lowercase__ ,jnp.ndarray ): __lowercase = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0: __lowercase = timesteps.astype(dtype=jnp.floataa ) __lowercase = jnp.expand_dims(lowercase__ ,0 ) __lowercase = self.time_proj(lowercase__ ) __lowercase = self.time_embedding(lowercase__ ) # 2. pre-process __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.conv_in(lowercase__ ) __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.controlnet_cond_embedding(lowercase__ ) sample += controlnet_cond # 3. down __lowercase = (sample,) for down_block in self.down_blocks: if isinstance(lowercase__ ,lowercase__ ): __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) else: __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid __lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) # 5. contronet blocks __lowercase = () for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ): __lowercase = controlnet_block(lowercase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __lowercase = controlnet_down_block_res_samples __lowercase = self.controlnet_mid_block(lowercase__ ) # 6. scaling __lowercase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
41
0
import argparse import collections import json import os import re import string import sys import numpy as np __SCREAMING_SNAKE_CASE =re.compile(r"""\b(a|an|the)\b""", re.UNICODE) __SCREAMING_SNAKE_CASE =None def a (): SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' ) parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' ) parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' ) parser.add_argument( '''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' ) parser.add_argument( '''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' ) parser.add_argument( '''--na-prob-thresh''' , '''-t''' , type=A__ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , ) parser.add_argument( '''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=A__ , help='''Save precision-recall curves to directory.''' ) parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def a (_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE_ = bool(qa['''answers''']['''text'''] ) return qid_to_has_ans def a (_lowerCAmelCase ): def remove_articles(_lowerCAmelCase ): return ARTICLES_REGEX.sub(''' ''' , A__ ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) ) def a (_lowerCAmelCase ): if not s: return [] return normalize_answer(A__ ).split() def a (_lowerCAmelCase , _lowerCAmelCase ): return int(normalize_answer(A__ ) == normalize_answer(A__ ) ) def a (_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = get_tokens(A__ ) SCREAMING_SNAKE_CASE_ = get_tokens(A__ ) SCREAMING_SNAKE_CASE_ = collections.Counter(A__ ) & collections.Counter(A__ ) SCREAMING_SNAKE_CASE_ = sum(common.values() ) if len(A__ ) == 0 or len(A__ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(A__ ) SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(A__ ) SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall) return fa def a (_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE_ = qa['''id'''] SCREAMING_SNAKE_CASE_ = [t for t in qa['''answers''']['''text'''] if normalize_answer(A__ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE_ = [''''''] if qid not in preds: print(F"Missing prediction for {qid}" ) continue SCREAMING_SNAKE_CASE_ = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE_ = max(compute_exact(A__ , A__ ) for a in gold_answers ) SCREAMING_SNAKE_CASE_ = max(compute_fa(A__ , A__ ) for a in gold_answers ) return exact_scores, fa_scores def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE_ = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE_ = float(not qid_to_has_ans[qid] ) else: SCREAMING_SNAKE_CASE_ = s return new_scores def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): if not qid_list: SCREAMING_SNAKE_CASE_ = len(A__ ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total), ('''total''', total), ] ) else: SCREAMING_SNAKE_CASE_ = len(A__ ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ('''total''', total), ] ) def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): for k in new_eval: SCREAMING_SNAKE_CASE_ = new_eval[k] def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): plt.step(A__ , A__ , color='''b''' , alpha=0.2 , where='''post''' ) plt.fill_between(A__ , A__ , step='''post''' , alpha=0.2 , color='''b''' ) plt.xlabel('''Recall''' ) plt.ylabel('''Precision''' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(A__ ) plt.savefig(A__ ) plt.clf() def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ): SCREAMING_SNAKE_CASE_ = sorted(A__ , key=lambda _lowerCAmelCase : na_probs[k] ) SCREAMING_SNAKE_CASE_ = 0.0 SCREAMING_SNAKE_CASE_ = 1.0 SCREAMING_SNAKE_CASE_ = 0.0 SCREAMING_SNAKE_CASE_ = [1.0] SCREAMING_SNAKE_CASE_ = [0.0] SCREAMING_SNAKE_CASE_ = 0.0 for i, qid in enumerate(A__ ): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE_ = true_pos / float(i + 1 ) SCREAMING_SNAKE_CASE_ = true_pos / float(A__ ) if i == len(A__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(A__ ) recalls.append(A__ ) if out_image: plot_pr_curve(A__ , A__ , A__ , A__ ) return {"ap": 1_0_0.0 * avg_prec} def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if out_image_dir and not os.path.exists(A__ ): os.makedirs(A__ ) SCREAMING_SNAKE_CASE_ = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return SCREAMING_SNAKE_CASE_ = make_precision_recall_eval( A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , ) SCREAMING_SNAKE_CASE_ = make_precision_recall_eval( A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , ) SCREAMING_SNAKE_CASE_ = {k: float(A__ ) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE_ = make_precision_recall_eval( A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , ) merge_eval(A__ , A__ , '''pr_exact''' ) merge_eval(A__ , A__ , '''pr_f1''' ) merge_eval(A__ , A__ , '''pr_oracle''' ) def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if not qid_list: return SCREAMING_SNAKE_CASE_ = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE_ = np.ones_like(A__ ) / float(len(A__ ) ) plt.hist(A__ , weights=A__ , bins=2_0 , range=(0.0, 1.0) ) plt.xlabel('''Model probability of no-answer''' ) plt.ylabel('''Proportion of dataset''' ) plt.title(F"Histogram of no-answer probability: {name}" ) plt.savefig(os.path.join(A__ , F"na_prob_hist_{name}.png" ) ) plt.clf() def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) SCREAMING_SNAKE_CASE_ = num_no_ans SCREAMING_SNAKE_CASE_ = cur_score SCREAMING_SNAKE_CASE_ = 0.0 SCREAMING_SNAKE_CASE_ = sorted(A__ , key=lambda _lowerCAmelCase : na_probs[k] ) for i, qid in enumerate(A__ ): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE_ = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE_ = -1 else: SCREAMING_SNAKE_CASE_ = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE_ = cur_score SCREAMING_SNAKE_CASE_ = na_probs[qid] return 1_0_0.0 * best_score / len(A__ ), best_thresh def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = find_best_thresh(A__ , A__ , A__ , A__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = find_best_thresh(A__ , A__ , A__ , A__ ) SCREAMING_SNAKE_CASE_ = best_exact SCREAMING_SNAKE_CASE_ = exact_thresh SCREAMING_SNAKE_CASE_ = best_fa SCREAMING_SNAKE_CASE_ = fa_thresh def a (): with open(OPTS.data_file ) as f: SCREAMING_SNAKE_CASE_ = json.load(A__ ) SCREAMING_SNAKE_CASE_ = dataset_json['''data'''] with open(OPTS.pred_file ) as f: SCREAMING_SNAKE_CASE_ = json.load(A__ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: SCREAMING_SNAKE_CASE_ = json.load(A__ ) else: SCREAMING_SNAKE_CASE_ = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE_ = make_qid_to_has_ans(A__ ) # maps qid to True/False SCREAMING_SNAKE_CASE_ = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE_ = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_raw_scores(A__ , A__ ) SCREAMING_SNAKE_CASE_ = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh ) SCREAMING_SNAKE_CASE_ = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh ) SCREAMING_SNAKE_CASE_ = make_eval_dict(A__ , A__ ) if has_ans_qids: SCREAMING_SNAKE_CASE_ = make_eval_dict(A__ , A__ , qid_list=A__ ) merge_eval(A__ , A__ , '''HasAns''' ) if no_ans_qids: SCREAMING_SNAKE_CASE_ = make_eval_dict(A__ , A__ , qid_list=A__ ) merge_eval(A__ , A__ , '''NoAns''' ) if OPTS.na_prob_file: find_all_best_thresh(A__ , A__ , A__ , A__ , A__ , A__ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(A__ , A__ , A__ , A__ , A__ , OPTS.out_image_dir ) histogram_na_prob(A__ , A__ , OPTS.out_image_dir , '''hasAns''' ) histogram_na_prob(A__ , A__ , OPTS.out_image_dir , '''noAns''' ) if OPTS.out_file: with open(OPTS.out_file , '''w''' ) as f: json.dump(A__ , A__ ) else: print(json.dumps(A__ , indent=2 ) ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
234
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase__ = False lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '''ybelkada/fonts''' def _A ( ): """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " '''Pix2StructImageProcessor. Please upgrade torch.''' ) def _A ( A__ , A__ , A__ ): """simple docstring""" requires_backends(A__ , ['''torch'''] ) _check_torch_version() __lowercase = image_tensor.unsqueeze(0 ) __lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) __lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 ) __lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Add new lines so that each line is no more than 80 characters. __lowercase = textwrap.TextWrapper(width=80 ) __lowercase = wrapper.wrap(text=A__ ) __lowercase = '''\n'''.join(A__ ) if font_bytes is not None and font_path is None: __lowercase = io.BytesIO(A__ ) elif font_path is not None: __lowercase = font_path else: __lowercase = hf_hub_download(A__ , '''Arial.TTF''' ) __lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. __lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) ) __lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ ) # Create the actual image with a bit of padding around the text. __lowercase = text_width + left_padding + right_padding __lowercase = text_height + top_padding + bottom_padding __lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ ) __lowercase = ImageDraw.Draw(A__ ) draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ ) return image def _A ( A__ , A__ , **A__ ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Convert to PIL image if necessary __lowercase = to_pil_image(A__ ) __lowercase = render_text(A__ , **A__ ) __lowercase = max(header_image.width , image.width ) __lowercase = int(image.height * (new_width / image.width) ) __lowercase = int(header_image.height * (new_width / header_image.width) ) __lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary __lowercase = to_numpy_array(A__ ) if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST: __lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST ) return new_image class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches'] def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,): super().__init__(**lowercase__ ) __lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6} __lowercase = do_normalize __lowercase = do_convert_rgb __lowercase = max_patches __lowercase = is_vqa def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ): requires_backends(self.extract_flattened_patches ,'''torch''' ) _check_torch_version() # convert to torch __lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST ) __lowercase = torch.from_numpy(lowercase__ ) __lowercase , __lowercase = patch_size['''height'''], patch_size['''width'''] __lowercase , __lowercase = get_image_size(lowercase__ ) # maximize scale s.t. __lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) __lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 ) __lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 ) __lowercase = max(num_feasible_rows * patch_height ,1 ) __lowercase = max(num_feasible_cols * patch_width ,1 ) __lowercase = torch.nn.functional.interpolate( image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] __lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = patches.shape __lowercase = patches_shape[1] __lowercase = patches_shape[2] __lowercase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] __lowercase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] __lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] ) __lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] __lowercase = row_ids.to(torch.floataa ) __lowercase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] __lowercase = torch.cat([row_ids, col_ids, patches] ,-1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] __lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float() __lowercase = to_numpy_array(lowercase__ ) return result def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ): if image.dtype == np.uinta: __lowercase = image.astype(np.floataa ) # take mean across the whole `image` __lowercase = np.mean(lowercase__ ) __lowercase = np.std(lowercase__ ) __lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,): __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase = patch_size if patch_size is not None else self.patch_size __lowercase = max_patches if max_patches is not None else self.max_patches __lowercase = self.is_vqa if kwargs.get('''data_format''' ,lowercase__ ) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''' ) __lowercase = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase = [convert_to_rgb(lowercase__ ) for image in images] # All transformations expect numpy arrays. __lowercase = [to_numpy_array(lowercase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''' ) __lowercase = kwargs.pop('''font_bytes''' ,lowercase__ ) __lowercase = kwargs.pop('''font_path''' ,lowercase__ ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = [header_text] * len(lowercase__ ) __lowercase = [ render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ ) for i, image in enumerate(lowercase__ ) ] if do_normalize: __lowercase = [self.normalize(image=lowercase__ ) for image in images] # convert to torch tensor and permute __lowercase = [ self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ ) for image in images ] # create attention mask in numpy __lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] __lowercase = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ ) return encoded_outputs
41
0
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase ): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowercase__ : Optional[Any] = grid[0] for row_n in range(1 , len(A__ ) ): lowercase__ : Optional[int] = grid[row_n] lowercase__ : List[str] = fill_row(A__ , A__ ) lowercase__ : Dict = grid[row_n] return grid[-1][-1] def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): current_row[0] += row_above[0] for cell_n in range(1 , len(A__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
152
'''simple docstring''' import doctest from collections import deque import numpy as np class lowercase_ : """simple docstring""" def __init__( self : Optional[Any] ): __lowercase = [2, 1, 2, -1] __lowercase = [1, 2, 3, 4] def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = len(self.first_signal ) __lowercase = len(self.second_signal ) __lowercase = max(lowercase__ ,lowercase__ ) # create a zero matrix of max_length x max_length __lowercase = [[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __lowercase = deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ ,2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
41
0
"""simple docstring""" from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar __A = TypeVar("T") class UpperCAmelCase (Generic[T] ): """simple docstring""" def __init__( self , _UpperCAmelCase = True ): lowercase__: int = {} # dictionary of lists lowercase__: int = directed def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase__ ) self.adj_list[destination_vertex].append(lowercase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase__ ) lowercase__: List[str] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(lowercase__ ) lowercase__: Optional[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: lowercase__: Optional[Any] = [destination_vertex] lowercase__: List[Any] = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase__ ) lowercase__: Union[str, Any] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: lowercase__: List[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: lowercase__: int = [destination_vertex] lowercase__: List[Any] = [] return self def __repr__( self ): return pformat(self.adj_list )
586
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
41
0
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowerCamelCase_()-> Dict: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join _SCREAMING_SNAKE_CASE : Optional[int] = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , A__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowerCamelCase_()-> Dict: assert _test_patching.open is open _SCREAMING_SNAKE_CASE : Optional[int] = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , A__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : int = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , A__ ): pass def lowerCamelCase_()-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Any = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , A__ ) is None with patch_submodule(_test_patching , """len""" , A__ ): assert _test_patching.len is mock assert _test_patching.len is len def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : Dict = """__test_patch_submodule_start_and_stop_mock__""" _SCREAMING_SNAKE_CASE : List[str] = patch_submodule(_test_patching , """open""" , A__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowerCamelCase_()-> Optional[int]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join _SCREAMING_SNAKE_CASE : Any = """__test_patch_submodule_successive_join__""" _SCREAMING_SNAKE_CASE : Union[str, Any] = """__test_patch_submodule_successive_dirname__""" _SCREAMING_SNAKE_CASE : int = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , A__ ): with patch_submodule(_test_patching , """os.rename""" , A__ ): with patch_submodule(_test_patching , """os.path.dirname""" , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , A__ ): with patch_submodule(_test_patching , """os.path.join""" , A__ ): with patch_submodule(_test_patching , """os.path.dirname""" , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowerCamelCase_()-> Dict: _SCREAMING_SNAKE_CASE : List[Any] = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , A__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , A__ ): pass
338
'''simple docstring''' import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase__ = getLogger(__name__) lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ): """simple docstring""" __lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' ) __lowercase = str(A__ ) __lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ ) if fpaa: __lowercase = model.half() __lowercase = AutoTokenizer.from_pretrained(A__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __lowercase = time.time() # update config with task specific params use_task_specific_params(A__ , A__ ) if prefix is None: __lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ): __lowercase = [prefix + text for text in examples_chunk] __lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ ) __lowercase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , ) __lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __lowercase = int(time.time() - start_time ) # seconds __lowercase = len(A__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _A ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def _A ( A__=True ): """simple docstring""" __lowercase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __lowercase , __lowercase = parser.parse_known_args() __lowercase = parse_numeric_n_bool_cl_kwargs(A__ ) if parsed_args and verbose: print(F"parsed the following generate kwargs: {parsed_args}" ) __lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __lowercase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=A__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __lowercase = generate_summaries_or_translations( A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , ) if args.reference_path is None: return {} # Compute scores __lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge __lowercase = [x.rstrip() for x in open(args.save_path ).readlines()] __lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )] __lowercase = score_fn(A__ , A__ ) scores.update(A__ ) if args.dump_args: scores.update(A__ ) if args.info: __lowercase = args.info if verbose: print(A__ ) if args.score_path is not None: json.dump(A__ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
41
0
import sys import turtle def a_ (__A , __A ) -> Union[str, Any]: """simple docstring""" return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def a_ (__A , __A , __A , __A , ) -> str: """simple docstring""" my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 ) triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 ) triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) UpperCAmelCase__ = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') UpperCAmelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
351
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ ): """simple docstring""" print(F"Vertex\tShortest Distance from vertex {src}" ) for i, d in enumerate(A__ ): print(F"{i}\t\t{d}" ) def _A ( A__ , A__ , A__ ): """simple docstring""" for j in range(A__ ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = [float('''inf''' )] * vertex_count __lowercase = 0.0 for _ in range(vertex_count - 1 ): for j in range(A__ ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __lowercase = distance[u] + w __lowercase = check_negative_cycle(A__ , A__ , A__ ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip()) lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip()) lowerCAmelCase__ = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight} lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip()) lowerCAmelCase__ = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
41
0
"""simple docstring""" def __snake_case ( __A : int = 1000 ) -> Optional[Any]: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
265
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ): warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' ,lowercase__ ,) super().__init__(*lowercase__ ,**lowercase__ )
41
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __A ( unittest.TestCase ): def __init__( self :Tuple , __snake_case :Dict , __snake_case :Union[str, Any]=7 , __snake_case :List[Any]=3 , __snake_case :List[Any]=30 , __snake_case :List[Any]=4_00 , __snake_case :List[str]=True , __snake_case :Tuple=None , __snake_case :Optional[Any]=0.9 , __snake_case :List[str]=None , __snake_case :str=True , __snake_case :Dict=[0.5, 0.5, 0.5] , __snake_case :Dict=[0.5, 0.5, 0.5] , ): '''simple docstring''' __magic_name__ : Union[str, Any] =size if size is not None else {"""shortest_edge""": 30} __magic_name__ : str =crop_size if crop_size is not None else {"""height""": 30, """width""": 30} __magic_name__ : Dict =parent __magic_name__ : Optional[Any] =batch_size __magic_name__ : List[str] =num_channels __magic_name__ : int =min_resolution __magic_name__ : Optional[Any] =max_resolution __magic_name__ : Any =do_resize_and_center_crop __magic_name__ : Optional[int] =size __magic_name__ : str =crop_pct __magic_name__ : Optional[int] =crop_size __magic_name__ : Any =do_normalize __magic_name__ : Optional[int] =image_mean __magic_name__ : Optional[int] =image_std def A__ ( self :str ): '''simple docstring''' return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __A ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = PoolFormerImageProcessor if is_vision_available() else None def A__ ( self :int ): '''simple docstring''' __magic_name__ : List[str] =PoolFormerImageProcessingTester(self ) @property def A__ ( self :Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self :Any ): '''simple docstring''' __magic_name__ : List[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowercase__ , """size""" ) ) self.assertTrue(hasattr(lowercase__ , """crop_pct""" ) ) self.assertTrue(hasattr(lowercase__ , """do_normalize""" ) ) self.assertTrue(hasattr(lowercase__ , """image_mean""" ) ) self.assertTrue(hasattr(lowercase__ , """image_std""" ) ) def A__ ( self :Optional[int] ): '''simple docstring''' __magic_name__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) __magic_name__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def A__ ( self :int ): '''simple docstring''' pass def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : Any =self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input __magic_name__ : str =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __magic_name__ : Optional[Any] =image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ ( self :int ): '''simple docstring''' __magic_name__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , np.ndarray ) # Test not batched input __magic_name__ : int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __magic_name__ : int =image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : int =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , torch.Tensor ) # Test not batched input __magic_name__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __magic_name__ : List[Any] =image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
21
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _A ( A__ ): """simple docstring""" __lowercase = FileLock(str(tmpdir / '''foo.lock''' ) ) __lowercase = FileLock(str(tmpdir / '''foo.lock''' ) ) __lowercase = 0.0_1 with locka.acquire(): with pytest.raises(A__ ): __lowercase = time.time() locka.acquire(A__ ) assert time.time() - _start > timeout def _A ( A__ ): """simple docstring""" __lowercase = '''a''' * 1000 + '''.lock''' __lowercase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(A__ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 __lowercase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(A__ ): locka.acquire(0 )
41
0
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowerCamelCase__ ( lowerCamelCase__ ): '''simple docstring''' def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,**lowerCamelCase_ ,) -> int: super().__init__(features=lowercase__ ,cache_dir=lowercase__ ,keep_in_memory=lowercase__ ,**lowercase__ ) A = Sql( cache_dir=lowercase__ ,features=lowercase__ ,sql=lowercase__ ,con=lowercase__ ,**lowercase__ ,) def UpperCamelCase__ ( self ) -> Optional[int]: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=lowercase__ ,download_mode=lowercase__ ,verification_mode=lowercase__ ,base_path=lowercase__ ,) # Build dataset for splits A = self.builder.as_dataset( split="""train""" ,verification_mode=lowercase__ ,in_memory=self.keep_in_memory ) return dataset class lowerCamelCase__ : '''simple docstring''' def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> Tuple: if num_proc is not None and num_proc <= 0: raise ValueError(f'num_proc {num_proc} must be an integer > 0.' ) A = dataset A = name A = con A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A = num_proc A = to_sql_kwargs def UpperCamelCase__ ( self ) -> Optional[Any]: A = self.to_sql_kwargs.pop("""sql""" ,lowercase__ ) A = self.to_sql_kwargs.pop("""con""" ,lowercase__ ) A = self.to_sql_kwargs.pop("""index""" ,lowercase__ ) A = self._write(index=lowercase__ ,**self.to_sql_kwargs ) return written def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]: A , A , A = args A = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs A = query_table( table=self.dataset.data ,key=slice(lowercase__ ,offset + self.batch_size ) ,indices=self.dataset._indices ,) A = batch.to_pandas() A = df.to_sql(self.name ,self.con ,index=lowercase__ ,**lowercase__ ) return num_rows or len(lowercase__ ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,**lowerCamelCase_ ) -> str: A = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating SQL from Arrow format""" ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: A , A = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,lowercase__ ,lowercase__ )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating SQL from Arrow format""" ,): written += num_rows return written
617
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase__ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
41
0
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowerCAmelCase_ : str = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' lowerCAmelCase_ : Union[str, Any] = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' lowerCAmelCase_ : List[Any] = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' return float((preds == labels).mean() ) def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="binary" ): '''simple docstring''' UpperCAmelCase = simple_accuracy(A__ , A__ ) UpperCAmelCase = float(fa_score(y_true=A__ , y_pred=A__ , average=A__ ) ) return { "accuracy": acc, "f1": fa, } def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = {} for id_pred, label in zip(A__ , A__ ): UpperCAmelCase = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' UpperCAmelCase = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCAmelCase = [(pred, label)] UpperCAmelCase , UpperCAmelCase = [], [] for question, preds_labels in question_map.items(): UpperCAmelCase , UpperCAmelCase = zip(*A__ ) UpperCAmelCase = fa_score(y_true=A__ , y_pred=A__ , average="""macro""" ) fas.append(A__ ) UpperCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(A__ ) ) ems.append(A__ ) UpperCAmelCase = float(sum(A__ ) / len(A__ ) ) UpperCAmelCase = sum(A__ ) / len(A__ ) UpperCAmelCase = float(fa_score(y_true=A__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self ) -> Optional[int]: """simple docstring""" if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def UpperCamelCase_ ( self ) -> int: """simple docstring""" if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> int: """simple docstring""" if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowercase__ , lowercase__ )} elif self.config_name == "cb": return acc_and_fa(lowercase__ , lowercase__ , fa_avg="""macro""" ) elif self.config_name == "record": UpperCAmelCase = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] UpperCAmelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(lowercase__ , lowercase__ )[0] elif self.config_name == "multirc": return evaluate_multirc(lowercase__ , lowercase__ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowercase__ , lowercase__ )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
673
'''simple docstring''' import argparse import os import re lowerCAmelCase__ = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowerCAmelCase__ = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''') def _A ( A__ ): """simple docstring""" __lowercase = _re_indent.search(A__ ) return "" if search is None else search.groups()[0] def _A ( A__ , A__="" , A__=None , A__=None ): """simple docstring""" __lowercase = 0 __lowercase = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(A__ ): index += 1 __lowercase = ['''\n'''.join(lines[:index] )] else: __lowercase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __lowercase = [lines[index]] index += 1 while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(A__ ) ) if index < len(A__ ) - 1: __lowercase = [lines[index + 1]] index += 1 else: __lowercase = [] else: blocks.append('''\n'''.join(A__ ) ) __lowercase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(A__ ) > 0: blocks.append('''\n'''.join(A__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(A__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _A ( A__ ): """simple docstring""" def _inner(A__ ): return key(A__ ).lower().replace('''_''' , '''''' ) return _inner def _A ( A__ , A__=None ): """simple docstring""" def noop(A__ ): return x if key is None: __lowercase = noop # Constants are all uppercase, they go first. __lowercase = [obj for obj in objects if key(A__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()] # Functions begin with a lowercase, they go last. __lowercase = [obj for obj in objects if not key(A__ )[0].isupper()] __lowercase = ignore_underscore(A__ ) return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) def _A ( A__ ): """simple docstring""" def _replace(A__ ): __lowercase = match.groups()[0] if "," not in imports: return F"[{imports}]" __lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]" __lowercase = import_statement.split('''\n''' ) if len(A__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __lowercase = 2 if lines[1].strip() == '''[''' else 1 __lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __lowercase = sort_objects(A__ , key=lambda A__ : x[1] ) __lowercase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(A__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __lowercase = _re_bracket_content.sub(_replace , lines[1] ) else: __lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowercase = keys[:-1] __lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] ) return "\n".join(A__ ) else: # Finally we have to deal with imports fitting on one line __lowercase = _re_bracket_content.sub(_replace , A__ ) return import_statement def _A ( A__ , A__=True ): """simple docstring""" with open(A__ , '''r''' ) as f: __lowercase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __lowercase = split_code_in_indented_blocks( A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(A__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __lowercase = main_blocks[block_idx] __lowercase = block.split('''\n''' ) # Get to the start of the imports. __lowercase = 0 while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __lowercase = len(A__ ) else: line_idx += 1 if line_idx >= len(A__ ): continue # Ignore beginning and last line: they don't contain anything. __lowercase = '''\n'''.join(block_lines[line_idx:-1] ) __lowercase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ ) # We have two categories of import key: list or _import_structure[key].append/extend __lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None] __lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __lowercase = 0 __lowercase = [] for i in range(len(A__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(A__ ) count += 1 # And we put our main block back together with its first and last line. __lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(A__ ): if check_only: return True else: print(F"Overwriting {file}." ) with open(A__ , '''w''' ) as f: f.write('''\n'''.join(A__ ) ) def _A ( A__=True ): """simple docstring""" __lowercase = [] for root, _, files in os.walk(A__ ): if "__init__.py" in files: __lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ ) if result: __lowercase = [os.path.join(A__ , '''__init__.py''' )] if len(A__ ) > 0: raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowerCAmelCase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
41
0
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _UpperCAmelCase ( UpperCAmelCase : Optional[int] = 3 ): """simple docstring""" if isinstance(A__ , A__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(A__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) __lowerCamelCase : List[Any] = QuantumRegister(A__ , """qr""" ) __lowerCamelCase : List[str] = ClassicalRegister(A__ , """cr""" ) __lowerCamelCase : int = QuantumCircuit(A__ , A__ ) __lowerCamelCase : int = number_of_qubits for i in range(A__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(A__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , A__ , A__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(A__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(A__ , A__ ) # simulate with 10000 shots __lowerCamelCase : List[str] = Aer.get_backend("""qasm_simulator""" ) __lowerCamelCase : Optional[Any] = execute(A__ , A__ , shots=10_000 ) return job.result().get_counts(A__ ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
519
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. SCREAMING_SNAKE_CASE : Optional[int] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'return_dict', 'callback', 'callback_steps', ] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,) __lowercase = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,) torch.manual_seed(0 ) __lowercase = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,) __lowercase = CLIPTextModel(lowercase__ ) __lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowercase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ): if str(lowercase__ ).startswith('''mps''' ): __lowercase = torch.manual_seed(lowercase__ ) else: __lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __lowercase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = TextToVideoSDPipeline(**lowercase__ ) __lowercase = sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) __lowercase = self.get_dummy_inputs(lowercase__ ) __lowercase = '''np''' __lowercase = sd_pipe(**lowercase__ ).frames __lowercase = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) __lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def SCREAMING_SNAKE_CASE ( self : Any ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): pass def SCREAMING_SNAKE_CASE ( self : List[str] ): return super().test_progress_bar() @slow @skip_mps class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) __lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowercase = pipe.to('''cuda''' ) __lowercase = '''Spiderman is surfing''' __lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames __lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) __lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __lowercase = pipe.to('''cuda''' ) __lowercase = '''Spiderman is surfing''' __lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames __lowercase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
41
0
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __snake_case : List[Any] = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' __snake_case : int = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n' __snake_case : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): """simple docstring""" def A__ ( self : List[Any] ): if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[ '''https://github.com/m-popovic/chrF''', ] , ) def A__ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : int = CHRF.CHAR_ORDER , _lowerCamelCase : int = CHRF.WORD_ORDER , _lowerCamelCase : int = CHRF.BETA , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ): A__ = len(references[0] ) if any(len(lowercase__ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) A__ = [[refs[i] for refs in references] for i in range(lowercase__ )] A__ = CHRF(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) A__ = sb_chrf.corpus_score(lowercase__ , lowercase__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
571
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _A ( A__ ): """simple docstring""" __lowercase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def _A ( A__ ): """simple docstring""" __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(A__ , A__ , bias=A__ ) __lowercase = emb.weight.data return lin_layer def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ): """simple docstring""" __lowercase = torch.load(A__ , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A__ ) __lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0] __lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ ) if mbart_aa and finetuned: __lowercase = '''relu''' __lowercase = state_dict['''decoder.embed_tokens.weight'''] __lowercase = MBartForConditionalGeneration(A__ ) model.model.load_state_dict(A__ ) if finetuned: __lowercase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
41
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class __magic_name__ ( lowerCamelCase__): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = 'deberta-v2' def __init__( self: int , _lowerCamelCase: Optional[Any]=12_81_00 , _lowerCamelCase: str=15_36 , _lowerCamelCase: List[Any]=24 , _lowerCamelCase: Union[str, Any]=24 , _lowerCamelCase: Optional[Any]=61_44 , _lowerCamelCase: Union[str, Any]="gelu" , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: Tuple=0.1 , _lowerCamelCase: Dict=5_12 , _lowerCamelCase: List[str]=0 , _lowerCamelCase: int=0.02 , _lowerCamelCase: str=1E-7 , _lowerCamelCase: Any=False , _lowerCamelCase: Union[str, Any]=-1 , _lowerCamelCase: Optional[int]=0 , _lowerCamelCase: List[str]=True , _lowerCamelCase: List[str]=None , _lowerCamelCase: Optional[int]=0 , _lowerCamelCase: str="gelu" , **_lowerCamelCase: Optional[Any] , ): super().__init__(**lowercase__ ) SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = relative_attention SCREAMING_SNAKE_CASE_ = max_relative_positions SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = position_biased_input # Backwards compatibility if type(lowercase__ ) == str: SCREAMING_SNAKE_CASE_ = [x.strip() for x in pos_att_type.lower().split('''|''' )] SCREAMING_SNAKE_CASE_ = pos_att_type SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = kwargs.get('''pooler_hidden_size''' , lowercase__ ) SCREAMING_SNAKE_CASE_ = pooler_dropout SCREAMING_SNAKE_CASE_ = pooler_hidden_act class __magic_name__ ( lowerCamelCase__): '''simple docstring''' @property def _A ( self: Dict ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] ) else: return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] ) @property def _A ( self: str ): return 12 def _A ( self: List[Any] , _lowerCamelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCamelCase: int = -1 , _lowerCamelCase: int = -1 , _lowerCamelCase: int = -1 , _lowerCamelCase: bool = False , _lowerCamelCase: Optional["TensorType"] = None , _lowerCamelCase: int = 3 , _lowerCamelCase: int = 40 , _lowerCamelCase: int = 40 , _lowerCamelCase: "PreTrainedTokenizerBase" = None , ): SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
234
'''simple docstring''' import os from math import logaa def _A ( A__ = "base_exp.txt" ): """simple docstring""" __lowercase = 0 __lowercase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ): __lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) ) if x * logaa(A__ ) > largest: __lowercase = x * logaa(A__ ) __lowercase = i + 1 return result if __name__ == "__main__": print(solution())
41
0
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a: Any = logging.get_logger(__name__) __a: str = { """huggingface/time-series-transformer-tourism-monthly""": ( """https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json""" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class UpperCAmelCase ( lowerCamelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'time_series_transformer' SCREAMING_SNAKE_CASE = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "student_t" , __lowerCAmelCase = "nll" , __lowerCAmelCase = 1 , __lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase = "mean" , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 32 , __lowerCAmelCase = 32 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = True , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 64 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> List[str]: # time series specific configuration lowercase__ : Tuple = prediction_length lowercase__ : Any = context_length or prediction_length lowercase__ : Union[str, Any] = distribution_output lowercase__ : str = loss lowercase__ : int = input_size lowercase__ : Optional[Any] = num_time_features lowercase__ : Optional[Any] = lags_sequence lowercase__ : List[Any] = scaling lowercase__ : int = num_dynamic_real_features lowercase__ : Optional[int] = num_static_real_features lowercase__ : str = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowercase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ : Optional[int] = cardinality else: lowercase__ : int = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowercase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ : Optional[Any] = embedding_dimension else: lowercase__ : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ : List[Any] = num_parallel_samples # Transformer architecture configuration lowercase__ : Dict = input_size * len(lowercase__ ) + self._number_of_features lowercase__ : str = d_model lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Any = decoder_attention_heads lowercase__ : Tuple = encoder_ffn_dim lowercase__ : Optional[Any] = decoder_ffn_dim lowercase__ : Optional[int] = encoder_layers lowercase__ : Optional[int] = decoder_layers lowercase__ : List[str] = dropout lowercase__ : List[Any] = attention_dropout lowercase__ : Optional[Any] = activation_dropout lowercase__ : List[Any] = encoder_layerdrop lowercase__ : Dict = decoder_layerdrop lowercase__ : Optional[int] = activation_function lowercase__ : List[str] = init_std lowercase__ : List[Any] = use_cache super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ ) @property def _lowerCAmelCase( self ) -> List[Any]: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
152
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small' SCREAMING_SNAKE_CASE : int = ['past_key_values'] SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,): __lowercase = vocab_size __lowercase = max_position_embeddings __lowercase = d_model __lowercase = encoder_ffn_dim __lowercase = encoder_layers __lowercase = encoder_attention_heads __lowercase = decoder_ffn_dim __lowercase = decoder_layers __lowercase = decoder_attention_heads __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = activation_function __lowercase = init_std __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = use_cache __lowercase = encoder_layers __lowercase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,) class lowercase_ (lowerCamelCase__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase = {0: '''batch'''} __lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} __lowercase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __lowercase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super().outputs else: __lowercase = super(lowercase__ ,self ).outputs if self.use_past: __lowercase , __lowercase = self.num_layers for i in range(lowercase__ ): __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} __lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) # Generate decoder inputs __lowercase = seq_length if not self.use_past else 1 __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __lowercase = dict(**lowercase__ ,**lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape __lowercase = common_inputs['''decoder_input_ids'''].shape[1] __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = decoder_seq_length + 3 __lowercase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 ) __lowercase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase , __lowercase = self.num_layers __lowercase = min(lowercase__ ,lowercase__ ) __lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers __lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase__ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), torch.zeros(lowercase__ ), ) ) # TODO: test this. __lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase__ ,lowercase__ ): common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __lowercase , __lowercase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __lowercase = seqlen + 2 __lowercase , __lowercase = self.num_layers __lowercase , __lowercase = self.num_attention_heads __lowercase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase = common_inputs['''attention_mask'''].dtype __lowercase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 ) __lowercase = [ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ ) ] return common_inputs def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = tokenizer.num_special_tokens_to_add(lowercase__ ) __lowercase = compute_effective_axis_dimension( lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ ) # Generate dummy inputs according to compute batch and sequence __lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,): if self.task in ["default", "seq2seq-lm"]: __lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) elif self.task == "causal-lm": __lowercase = self._generate_dummy_inputs_for_causal_lm( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) else: __lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ ) return common_inputs def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): if self.task in ["default", "seq2seq-lm"]: __lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) else: __lowercase = super(lowercase__ ,self )._flatten_past_key_values_( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
41
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase (lowerCamelCase__ ): """simple docstring""" _UpperCAmelCase :str = ['pixel_values'] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ): super().__init__(**lowercase__ ) lowercase__: Optional[int] = size if size is not None else {'''shortest_edge''': 224} lowercase__: Dict = get_size_dict(lowercase__ , default_to_square=lowercase__ ) lowercase__: Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase__: Optional[int] = get_size_dict(lowercase__ , default_to_square=lowercase__ , param_name='''crop_size''' ) lowercase__: Any = do_resize lowercase__: int = size lowercase__: Optional[Any] = resample lowercase__: str = do_center_crop lowercase__: Tuple = crop_size lowercase__: int = do_rescale lowercase__: Optional[int] = rescale_factor lowercase__: str = do_normalize lowercase__: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__: Dict = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__: Dict = do_convert_rgb def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ): lowercase__: Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowercase__: List[str] = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ ) return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): lowercase__: str = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ): lowercase__: Dict = do_resize if do_resize is not None else self.do_resize lowercase__: Union[str, Any] = size if size is not None else self.size lowercase__: List[Any] = get_size_dict(lowercase__ , param_name='''size''' , default_to_square=lowercase__ ) lowercase__: Any = resample if resample is not None else self.resample lowercase__: int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__: Tuple = crop_size if crop_size is not None else self.crop_size lowercase__: Optional[int] = get_size_dict(lowercase__ , param_name='''crop_size''' , default_to_square=lowercase__ ) lowercase__: Any = do_rescale if do_rescale is not None else self.do_rescale lowercase__: int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__: int = do_normalize if do_normalize is not None else self.do_normalize lowercase__: Union[str, Any] = image_mean if image_mean is not None else self.image_mean lowercase__: int = image_std if image_std is not None else self.image_std lowercase__: List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__: Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__: int = [convert_to_rgb(lowercase__ ) for image in images] # All transformations expect numpy arrays. lowercase__: Tuple = [to_numpy_array(lowercase__ ) for image in images] if do_resize: lowercase__: Union[str, Any] = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: lowercase__: Tuple = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: lowercase__: Optional[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: lowercase__: Optional[int] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] lowercase__: int = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] lowercase__: Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
586
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ ): """simple docstring""" if b == 0: return (1, 0) ((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b ) __lowercase = a // b return (y, x - k * y) def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m def _A ( A__ , A__ ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ ) if b < 0: __lowercase = (b % n + n) % n return b def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
41
0
"""simple docstring""" from random import shuffle import tensorflow as tf from numpy import array def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = int(A__ ) assert noofclusters < len(A__ ) # Find out the dimensionality _SCREAMING_SNAKE_CASE : Any = len(vectors[0] ) # Will help select random centroids from among the available vectors _SCREAMING_SNAKE_CASE : List[Any] = list(range(len(A__ ) ) ) shuffle(A__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _SCREAMING_SNAKE_CASE : List[Any] = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _SCREAMING_SNAKE_CASE : Dict = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _SCREAMING_SNAKE_CASE : Tuple = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(A__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values _SCREAMING_SNAKE_CASE : Any = tf.placeholder("""float64""" , [dim] ) _SCREAMING_SNAKE_CASE : int = [] for centroid in centroids: cent_assigns.append(tf.assign(A__ , A__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _SCREAMING_SNAKE_CASE : Union[str, Any] = [tf.Variable(0 ) for i in range(len(A__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value _SCREAMING_SNAKE_CASE : Tuple = tf.placeholder("""int32""" ) _SCREAMING_SNAKE_CASE : int = [] for assignment in assignments: cluster_assigns.append(tf.assign(A__ , A__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _SCREAMING_SNAKE_CASE : List[str] = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _SCREAMING_SNAKE_CASE : List[str] = tf.reduce_mean(A__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input _SCREAMING_SNAKE_CASE : str = tf.placeholder("""float""" , [dim] ) _SCREAMING_SNAKE_CASE : str = tf.placeholder("""float""" , [dim] ) _SCREAMING_SNAKE_CASE : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(A__ , A__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _SCREAMING_SNAKE_CASE : List[str] = tf.placeholder("""float""" , [noofclusters] ) _SCREAMING_SNAKE_CASE : List[Any] = tf.argmin(A__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _SCREAMING_SNAKE_CASE : Tuple = tf.initialize_all_variables() # Initialize all variables sess.run(A__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _SCREAMING_SNAKE_CASE : List[str] = 100 for _ in range(A__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(A__ ) ): _SCREAMING_SNAKE_CASE : Any = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _SCREAMING_SNAKE_CASE : int = [ sess.run(A__ , feed_dict={va: vect, va: sess.run(A__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _SCREAMING_SNAKE_CASE : List[str] = sess.run( A__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(A__ ): # Collect all the vectors assigned to this cluster _SCREAMING_SNAKE_CASE : Any = [ vectors[i] for i in range(len(A__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _SCREAMING_SNAKE_CASE : Any = sess.run( A__ , feed_dict={mean_input: array(A__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _SCREAMING_SNAKE_CASE : Dict = sess.run(A__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = sess.run(A__ ) return centroids, assignments
338
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _A ( ): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __lowercase = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , A__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _A ( ): """simple docstring""" assert _test_patching.open is open __lowercase = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , A__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ): pass def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , A__ ) is None with patch_submodule(_test_patching , '''len''' , A__ ): assert _test_patching.len is mock assert _test_patching.len is len def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_start_and_stop_mock__''' __lowercase = patch_submodule(_test_patching , '''open''' , A__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _A ( ): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __lowercase = '''__test_patch_submodule_successive_join__''' __lowercase = '''__test_patch_submodule_successive_dirname__''' __lowercase = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , A__ ): with patch_submodule(_test_patching , '''os.path.join''' , A__ ): with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _A ( ): """simple docstring""" __lowercase = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ): pass
41
0
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCAmelCase__ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
351
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase_ : """simple docstring""" def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __lowercase = ids_tensor([self.batch_size] ,self.num_choices ) __lowercase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return NezhaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.prepare_config_and_inputs() __lowercase = True __lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ): __lowercase = NezhaModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ,token_type_ids=lowercase__ ) __lowercase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,): __lowercase = True __lowercase = NezhaModel(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,) __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ): __lowercase = NezhaForMaskedLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): __lowercase = NezhaForNextSentencePrediction(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ): __lowercase = NezhaForPreTraining(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ): __lowercase = NezhaForQuestionAnswering(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ): __lowercase = self.num_labels __lowercase = NezhaForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ): __lowercase = self.num_labels __lowercase = NezhaForTokenClassification(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ): __lowercase = self.num_choices __lowercase = NezhaForMultipleChoice(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Tuple = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : List[str] = True def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ): __lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) if return_labels: if model_class in get_values(lowercase__ ): __lowercase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ ) __lowercase = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = NezhaModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : int ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): # This regression test was failing with PyTorch < 1.3 ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __lowercase = None self.model_tester.create_and_check_model_as_decoder( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = NezhaModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __lowercase = True __lowercase = model_class(config=lowercase__ ) __lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ) __lowercase = torch.jit.trace( lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) ) __lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ ) loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0] __lowercase = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape ,lowercase__ ) __lowercase = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
41
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A_ : Union[str, Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( lowerCamelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values'] def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Tuple: """simple docstring""" super().__init__(**lowercase__ ) SCREAMING_SNAKE_CASE : int = size if size is not None else {'height': 256, 'width': 256} SCREAMING_SNAKE_CASE : Dict = get_size_dict(lowercase__ ) SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowercase__ , param_name='crop_size' ) SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : List[Any] = size SCREAMING_SNAKE_CASE : List[Any] = resample SCREAMING_SNAKE_CASE : Tuple = do_center_crop SCREAMING_SNAKE_CASE : int = crop_size SCREAMING_SNAKE_CASE : Dict = do_rescale SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor SCREAMING_SNAKE_CASE : int = do_normalize SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( lowercase__ , size=(size['height'], size['width']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def _lowerCAmelCase ( self : Tuple , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(lowercase__ , size=(size['height'], size['width']) , data_format=lowercase__ , **lowercase__ ) def _lowerCAmelCase ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[int, float] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Any , ) -> Optional[int]: """simple docstring""" return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def _lowerCAmelCase ( self : Tuple , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> Tuple: """simple docstring""" return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def _lowerCAmelCase ( self : Dict , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : float = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Optional[int] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : Any = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE : str = size if size is not None else self.size SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowercase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : int = get_size_dict(lowercase__ , param_name='crop_size' ) SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(lowercase__ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE : str = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE : Dict = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE : Dict = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] SCREAMING_SNAKE_CASE : Dict = {'pixel_values': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
265
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''KEY''') lowerCAmelCase__ = TypeVar('''VAL''') @dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ ) class lowercase_ (Generic[KEY, VAL] ): """simple docstring""" SCREAMING_SNAKE_CASE : KEY SCREAMING_SNAKE_CASE : VAL class lowercase_ (_Item ): """simple docstring""" def __init__( self : Optional[int] ): super().__init__(lowercase__ ,lowercase__ ) def __bool__( self : List[str] ): return False lowerCAmelCase__ = _DeletedItem() class lowercase_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ): __lowercase = initial_block_size __lowercase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowercase = capacity_factor __lowercase = 0 def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ): return hash(lowercase__ ) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ): return (ind + 1) % len(self._buckets ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ): __lowercase = self._buckets[ind] if not stored: __lowercase = _Item(lowercase__ ,lowercase__ ) self._len += 1 return True elif stored.key == key: __lowercase = _Item(lowercase__ ,lowercase__ ) return True else: return False def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): if len(self._buckets ) <= self._initial_block_size: return False __lowercase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ): __lowercase = self._buckets __lowercase = [None] * new_size __lowercase = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def SCREAMING_SNAKE_CASE ( self : str ): self._resize(len(self._buckets ) * 2 ) def SCREAMING_SNAKE_CASE ( self : Tuple ): self._resize(len(self._buckets ) // 2 ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ): __lowercase = self._get_bucket_index(lowercase__ ) for _ in range(len(self._buckets ) ): yield ind __lowercase = self._get_next_ind(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): for ind in self._iterate_buckets(lowercase__ ): if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ): break def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ): if self._is_full(): self._size_up() self._add_item(lowercase__ ,lowercase__ ) def __delitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: raise KeyError(lowercase__ ) if item is _deleted: continue if item.key == key: __lowercase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Tuple ,lowercase__ : KEY ): for ind in self._iterate_buckets(lowercase__ ): __lowercase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowercase__ ) def __len__( self : Optional[int] ): return self._len def __iter__( self : str ): yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ): __lowercase = ''' ,'''.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
41
0
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=5 ): assert masked_input.count("""<mask>""" ) == 1 __magic_name__ : Any =torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1 __magic_name__ : List[str] =model(A__ )[0] # The last hidden-state is the first element of the output tuple __magic_name__ : Union[str, Any] =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __magic_name__ : Dict =logits[0, masked_index, :] __magic_name__ : Union[str, Any] =logits.softmax(dim=0 ) __magic_name__ , __magic_name__ : Optional[Any] =prob.topk(k=A__ , dim=0 ) __magic_name__ : Optional[Any] =""" """.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] ) __magic_name__ : str =tokenizer.mask_token __magic_name__ : Union[str, Any] =[] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ): __magic_name__ : List[Any] =predicted_token_bpe.replace("""\u2581""" , """ """ ) if " {0}".format(A__ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(""" {0}""".format(A__ ) , A__ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(A__ , A__ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase_ : Optional[int] = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase_ : str = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase_ : Tuple = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
21
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ,**lowercase__ : Tuple ): super().__init__(**lowercase__ ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self ,'''vision''' ) self.check_model_type(lowercase__ ) def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,): if "text_queries" in kwargs: __lowercase = kwargs.pop('''text_queries''' ) if isinstance(lowercase__ ,(str, Image.Image) ): __lowercase = {'''image''': image, '''candidate_labels''': candidate_labels} else: __lowercase = image __lowercase = super().__call__(lowercase__ ,**lowercase__ ) return results def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ): __lowercase = {} if "threshold" in kwargs: __lowercase = kwargs['''threshold'''] if "top_k" in kwargs: __lowercase = kwargs['''top_k'''] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ): __lowercase = load_image(inputs['''image'''] ) __lowercase = inputs['''candidate_labels'''] if isinstance(lowercase__ ,lowercase__ ): __lowercase = candidate_labels.split(''',''' ) __lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa ) for i, candidate_label in enumerate(lowercase__ ): __lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework ) __lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework ) yield { "is_last": i == len(lowercase__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ): __lowercase = model_inputs.pop('''target_size''' ) __lowercase = model_inputs.pop('''candidate_label''' ) __lowercase = model_inputs.pop('''is_last''' ) __lowercase = self.model(**lowercase__ ) __lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ): __lowercase = [] for model_output in model_outputs: __lowercase = model_output['''candidate_label'''] __lowercase = BaseModelOutput(lowercase__ ) __lowercase = self.image_processor.post_process_object_detection( outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): __lowercase = outputs['''scores'''][index].item() __lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] ) __lowercase = {'''score''': score, '''label''': label, '''box''': box} results.append(lowercase__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ ) if top_k: __lowercase = results[:top_k] return results def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ): if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) __lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist() __lowercase = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
41
0
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCamelCase__ ( lowerCamelCase__ ): '''simple docstring''' def __init__( self ) -> int: A = [] def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]: self.events.append("""on_init_end""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]: self.events.append("""on_train_begin""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> List[Any]: self.events.append("""on_train_end""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]: self.events.append("""on_epoch_begin""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> int: self.events.append("""on_epoch_end""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Dict: self.events.append("""on_step_begin""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Any: self.events.append("""on_step_end""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> str: self.events.append("""on_evaluate""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Union[str, Any]: self.events.append("""on_predict""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> List[Any]: self.events.append("""on_save""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> str: self.events.append("""on_log""" ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> List[Any]: self.events.append("""on_prediction_step""" ) @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ) -> Optional[int]: A = tempfile.mkdtemp() def UpperCamelCase__ ( self ) -> int: shutil.rmtree(self.output_dir ) def UpperCamelCase__ ( self ,lowerCamelCase_=0 ,lowerCamelCase_=0 ,lowerCamelCase_=6_4 ,lowerCamelCase_=6_4 ,lowerCamelCase_=None ,lowerCamelCase_=False ,**lowerCamelCase_ ) -> Any: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. A = RegressionDataset(length=lowercase__ ) A = RegressionDataset(length=lowercase__ ) A = RegressionModelConfig(a=lowercase__ ,b=lowercase__ ) A = RegressionPreTrainedModel(lowercase__ ) A = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ ) return Trainer( lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]: self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) ) # Order doesn't matter A = sorted(lowercase__ ,key=lambda lowerCamelCase_ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) A = sorted(lowercase__ ,key=lambda lowerCamelCase_ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase__ ,lowercase__ ): if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,lowercase__ ) elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,cba.__class__ ) elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(cba.__class__ ,lowercase__ ) else: self.assertEqual(lowercase__ ,lowercase__ ) def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]: A = ["""on_init_end""", """on_train_begin"""] A = 0 A = len(trainer.get_eval_dataloader() ) A = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(lowercase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def UpperCamelCase__ ( self ) -> Optional[int]: A = self.get_trainer() A = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # Callbacks passed at init are added to the default callbacks A = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback A = self.get_trainer(disable_tqdm=lowercase__ ) A = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def UpperCamelCase__ ( self ) -> List[Any]: A = DEFAULT_CALLBACKS.copy() + [ProgressCallback] A = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) A = self.get_trainer() A = trainer.pop_callback(lowercase__ ) self.assertEqual(cb.__class__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # We can also add, pop, or remove by instance A = self.get_trainer() A = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) A = self.get_trainer() A = trainer.callback_handler.callbacks[0] A = trainer.pop_callback(lowercase__ ) self.assertEqual(lowercase__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def UpperCamelCase__ ( self ) -> List[str]: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" ,category=lowercase__ ) A = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() A = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # Independent log/save/eval A = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() A = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) A = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() A = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) A = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy="""steps""" ) trainer.train() A = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) A = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy="""epoch""" ) trainer.train() A = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # A bit of everything A = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy="""steps""" ,) trainer.train() A = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: A = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(lowercase__ ) in warn_mock.call_args[0][0]
617
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli' SCREAMING_SNAKE_CASE : Optional[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) SCREAMING_SNAKE_CASE : Any = 'text_classifier' SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']] SCREAMING_SNAKE_CASE : List[str] = ['text'] def SCREAMING_SNAKE_CASE ( self : List[Any] ): super().setup() __lowercase = self.model.config __lowercase = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __lowercase = int(lowercase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ): __lowercase = labels return self.pre_processor( [text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = outputs.logits __lowercase = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
41
0
"""simple docstring""" import math def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = 0 , lowerCAmelCase = 0 ): '''simple docstring''' UpperCAmelCase = end or len(A__ ) for i in range(A__ , A__ ): UpperCAmelCase = i UpperCAmelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: UpperCAmelCase = array[temp_index - 1] temp_index -= 1 UpperCAmelCase = temp_index_value return array def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): # Max Heap '''simple docstring''' UpperCAmelCase = index UpperCAmelCase = 2 * index + 1 # Left Node UpperCAmelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: UpperCAmelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: UpperCAmelCase = right_index if largest != index: UpperCAmelCase , UpperCAmelCase = array[largest], array[index] heapify(A__ , A__ , A__ ) def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = len(A__ ) for i in range(n // 2 , -1 , -1 ): heapify(A__ , A__ , A__ ) for i in range(n - 1 , 0 , -1 ): UpperCAmelCase , UpperCAmelCase = array[0], array[i] heapify(A__ , 0 , A__ ) return array def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = low UpperCAmelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i UpperCAmelCase , UpperCAmelCase = array[j], array[i] i += 1 def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' if len(A__ ) == 0: return array UpperCAmelCase = 2 * math.ceil(math.loga(len(A__ ) ) ) UpperCAmelCase = 16 return intro_sort(A__ , 0 , len(A__ ) , A__ , A__ ) def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A__ ) max_depth -= 1 UpperCAmelCase = median_of_a(A__ , A__ , start + ((end - start) // 2) + 1 , end - 1 ) UpperCAmelCase = partition(A__ , A__ , A__ , A__ ) intro_sort(A__ , A__ , A__ , A__ , A__ ) UpperCAmelCase = p return insertion_sort(A__ , A__ , A__ ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ : Optional[int] = input('''Enter numbers separated by a comma : ''').strip() lowerCAmelCase_ : str = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
673
'''simple docstring''' from collections.abc import Callable class lowercase_ : """simple docstring""" def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ): # Stores actual heap items. __lowercase = [] # Stores indexes of each item for supporting updates and deletion. __lowercase = {} # Stores current size of heap. __lowercase = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __lowercase = key or (lambda lowercase__ : x) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ): return int((i - 1) / 2 ) if i > 0 else None def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = int(2 * i + 1 ) return left if 0 < left < self.size else None def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ): __lowercase = int(2 * i + 2 ) return right if 0 < right < self.size else None def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ): __lowercase , __lowercase = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __lowercase , __lowercase = self.arr[j], self.arr[i] def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): return self.arr[i][1] < self.arr[j][1] def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._left(lowercase__ ) __lowercase = self._right(lowercase__ ) __lowercase = i if left is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = left if right is not None and not self._cmp(lowercase__ ,lowercase__ ): __lowercase = right return valid_parent def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = self._parent(lowercase__ ) while parent is not None and not self._cmp(lowercase__ ,lowercase__ ): self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = parent, self._parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ): __lowercase = self._get_valid_parent(lowercase__ ) while valid_parent != index: self._swap(lowercase__ ,lowercase__ ) __lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] __lowercase = [item, self.key(lowercase__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): if item not in self.pos_map: return __lowercase = self.pos_map[item] del self.pos_map[item] __lowercase = self.arr[self.size - 1] __lowercase = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowercase__ ) self._heapify_down(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ): __lowercase = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowercase__ )] ) else: __lowercase = [item, self.key(lowercase__ )] __lowercase = self.size self.size += 1 self._heapify_up(self.size - 1 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): return self.arr[0] if self.size else None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _A ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
41
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : Any = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=A__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=A__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=A__ ) return parser.parse_args() def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : Optional[int] = parse_args() # Import training_script as a module. __lowerCamelCase : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __lowerCamelCase : str = script_fpath.stem __lowerCamelCase : Dict = importlib.import_module(A__ ) # Patch sys.argv __lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
519
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : List[str] ): __lowercase = [] def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ): self.events.append('''on_init_end''' ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ): self.events.append('''on_train_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ): self.events.append('''on_train_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ): self.events.append('''on_epoch_begin''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ): self.events.append('''on_epoch_end''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_step_begin''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ): self.events.append('''on_step_end''' ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ): self.events.append('''on_evaluate''' ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ): self.events.append('''on_predict''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ): self.events.append('''on_save''' ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ): self.events.append('''on_log''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ): self.events.append('''on_prediction_step''' ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): shutil.rmtree(self.output_dir ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionDataset(length=lowercase__ ) __lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ ) __lowercase = RegressionPreTrainedModel(lowercase__ ) __lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ ) return Trainer( lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ): self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) ) # Order doesn't matter __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) __lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase__ ,lowercase__ ): if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,lowercase__ ) elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ): self.assertEqual(lowercase__ ,cba.__class__ ) elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ): self.assertEqual(cba.__class__ ,lowercase__ ) else: self.assertEqual(lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ): __lowercase = ['''on_init_end''', '''on_train_begin'''] __lowercase = 0 __lowercase = len(trainer.get_eval_dataloader() ) __lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(lowercase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.get_trainer() __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # Callbacks passed at init are added to the default callbacks __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback __lowercase = self.get_trainer(disable_tqdm=lowercase__ ) __lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] __lowercase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(cb.__class__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) # We can also add, pop, or remove by instance __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase__ ) expected_callbacks.remove(lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) __lowercase = self.get_trainer() __lowercase = trainer.callback_handler.callbacks[0] __lowercase = trainer.pop_callback(lowercase__ ) self.assertEqual(lowercase__ ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) trainer.add_callback(lowercase__ ) expected_callbacks.insert(0 ,lowercase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' ,category=lowercase__ ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # Independent log/save/eval __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) __lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' ) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # A bit of everything __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,) trainer.train() __lowercase = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: __lowercase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(lowercase__ ) in warn_mock.call_args[0][0]
41
0
"""simple docstring""" def a_ ( __a , __a ): return int((input_a, input_a).count(1 ) != 0 ) def a_ ( ): assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
571
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : jnp.ndarray SCREAMING_SNAKE_CASE : jnp.ndarray class lowercase_ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __lowercase = [] for i in range(len(self.block_out_channels ) - 1 ): __lowercase = self.block_out_channels[i] __lowercase = self.block_out_channels[i + 1] __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = nn.Conv( lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(lowercase__ ) __lowercase = blocks __lowercase = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : List[str] ,lowercase__ : Optional[int] ): __lowercase = self.conv_in(lowercase__ ) __lowercase = nn.silu(lowercase__ ) for block in self.blocks: __lowercase = block(lowercase__ ) __lowercase = nn.silu(lowercase__ ) __lowercase = self.conv_out(lowercase__ ) return embedding @flax_register_to_config class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 3_2 SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) SCREAMING_SNAKE_CASE : int = 2 SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None SCREAMING_SNAKE_CASE : int = 1_2_8_0 SCREAMING_SNAKE_CASE : float = 0.0 SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa SCREAMING_SNAKE_CASE : bool = True SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : str = "rgb" SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ): # init input tensors __lowercase = (1, self.in_channels, self.sample_size, self.sample_size) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase = jnp.ones((1,) ,dtype=jnp.intaa ) __lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) __lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8) __lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa ) __lowercase , __lowercase = jax.random.split(lowercase__ ) __lowercase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"] def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.block_out_channels __lowercase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowercase = self.num_attention_heads or self.attention_head_dim # input __lowercase = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time __lowercase = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) __lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype ) __lowercase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) __lowercase = self.only_cross_attention if isinstance(lowercase__ ,lowercase__ ): __lowercase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = (num_attention_heads,) * len(self.down_block_types ) # down __lowercase = [] __lowercase = [] __lowercase = block_out_channels[0] __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) for i, down_block_type in enumerate(self.down_block_types ): __lowercase = output_channel __lowercase = block_out_channels[i] __lowercase = i == len(lowercase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowercase = FlaxCrossAttnDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: __lowercase = FlaxDownBlockaD( in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(lowercase__ ) for _ in range(self.layers_per_block ): __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) if not is_final_block: __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(lowercase__ ) __lowercase = down_blocks __lowercase = controlnet_down_blocks # mid __lowercase = block_out_channels[-1] __lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) __lowercase = nn.Conv( lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,): __lowercase = self.controlnet_conditioning_channel_order if channel_order == "bgr": __lowercase = jnp.flip(lowercase__ ,axis=1 ) # 1. time if not isinstance(lowercase__ ,jnp.ndarray ): __lowercase = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0: __lowercase = timesteps.astype(dtype=jnp.floataa ) __lowercase = jnp.expand_dims(lowercase__ ,0 ) __lowercase = self.time_proj(lowercase__ ) __lowercase = self.time_embedding(lowercase__ ) # 2. pre-process __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.conv_in(lowercase__ ) __lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) ) __lowercase = self.controlnet_cond_embedding(lowercase__ ) sample += controlnet_cond # 3. down __lowercase = (sample,) for down_block in self.down_blocks: if isinstance(lowercase__ ,lowercase__ ): __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) else: __lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid __lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train ) # 5. contronet blocks __lowercase = () for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ): __lowercase = controlnet_block(lowercase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __lowercase = controlnet_down_block_res_samples __lowercase = self.controlnet_mid_block(lowercase__ ) # 6. scaling __lowercase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
41
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __magic_name__ ( unittest.TestCase): '''simple docstring''' def __init__( self: Union[str, Any] , _lowerCamelCase: int , _lowerCamelCase: Any=7 , _lowerCamelCase: Optional[Any]=3 , _lowerCamelCase: Any=18 , _lowerCamelCase: Optional[Any]=30 , _lowerCamelCase: Any=4_00 , _lowerCamelCase: str=True , _lowerCamelCase: Union[str, Any]=32 , _lowerCamelCase: Tuple=True , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = min_resolution SCREAMING_SNAKE_CASE_ = max_resolution SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size_divisor SCREAMING_SNAKE_CASE_ = do_rescale def _A ( self: Optional[Any] ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = GLPNImageProcessor if is_vision_available() else None def _A ( self: Union[str, Any] ): SCREAMING_SNAKE_CASE_ = GLPNImageProcessingTester(self ) @property def _A ( self: str ): return self.image_processor_tester.prepare_image_processor_dict() def _A ( self: int ): SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowercase__ , '''size_divisor''' ) ) self.assertTrue(hasattr(lowercase__ , '''resample''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_rescale''' ) ) def _A ( self: Union[str, Any] ): pass def _A ( self: List[str] ): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def _A ( self: Optional[Any] ): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def _A ( self: Dict ): # Initialize image_processing SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
234
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase__ = False lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '''ybelkada/fonts''' def _A ( ): """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " '''Pix2StructImageProcessor. Please upgrade torch.''' ) def _A ( A__ , A__ , A__ ): """simple docstring""" requires_backends(A__ , ['''torch'''] ) _check_torch_version() __lowercase = image_tensor.unsqueeze(0 ) __lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) __lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 ) __lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Add new lines so that each line is no more than 80 characters. __lowercase = textwrap.TextWrapper(width=80 ) __lowercase = wrapper.wrap(text=A__ ) __lowercase = '''\n'''.join(A__ ) if font_bytes is not None and font_path is None: __lowercase = io.BytesIO(A__ ) elif font_path is not None: __lowercase = font_path else: __lowercase = hf_hub_download(A__ , '''Arial.TTF''' ) __lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. __lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) ) __lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ ) # Create the actual image with a bit of padding around the text. __lowercase = text_width + left_padding + right_padding __lowercase = text_height + top_padding + bottom_padding __lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ ) __lowercase = ImageDraw.Draw(A__ ) draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ ) return image def _A ( A__ , A__ , **A__ ): """simple docstring""" requires_backends(A__ , '''vision''' ) # Convert to PIL image if necessary __lowercase = to_pil_image(A__ ) __lowercase = render_text(A__ , **A__ ) __lowercase = max(header_image.width , image.width ) __lowercase = int(image.height * (new_width / image.width) ) __lowercase = int(header_image.height * (new_width / header_image.width) ) __lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary __lowercase = to_numpy_array(A__ ) if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST: __lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST ) return new_image class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches'] def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,): super().__init__(**lowercase__ ) __lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6} __lowercase = do_normalize __lowercase = do_convert_rgb __lowercase = max_patches __lowercase = is_vqa def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ): requires_backends(self.extract_flattened_patches ,'''torch''' ) _check_torch_version() # convert to torch __lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST ) __lowercase = torch.from_numpy(lowercase__ ) __lowercase , __lowercase = patch_size['''height'''], patch_size['''width'''] __lowercase , __lowercase = get_image_size(lowercase__ ) # maximize scale s.t. __lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) __lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 ) __lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 ) __lowercase = max(num_feasible_rows * patch_height ,1 ) __lowercase = max(num_feasible_cols * patch_width ,1 ) __lowercase = torch.nn.functional.interpolate( image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] __lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = patches.shape __lowercase = patches_shape[1] __lowercase = patches_shape[2] __lowercase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] __lowercase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] __lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] ) __lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] __lowercase = row_ids.to(torch.floataa ) __lowercase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] __lowercase = torch.cat([row_ids, col_ids, patches] ,-1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] __lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float() __lowercase = to_numpy_array(lowercase__ ) return result def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ): if image.dtype == np.uinta: __lowercase = image.astype(np.floataa ) # take mean across the whole `image` __lowercase = np.mean(lowercase__ ) __lowercase = np.std(lowercase__ ) __lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,): __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase = patch_size if patch_size is not None else self.patch_size __lowercase = max_patches if max_patches is not None else self.max_patches __lowercase = self.is_vqa if kwargs.get('''data_format''' ,lowercase__ ) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''' ) __lowercase = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase = [convert_to_rgb(lowercase__ ) for image in images] # All transformations expect numpy arrays. __lowercase = [to_numpy_array(lowercase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''' ) __lowercase = kwargs.pop('''font_bytes''' ,lowercase__ ) __lowercase = kwargs.pop('''font_path''' ,lowercase__ ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = [header_text] * len(lowercase__ ) __lowercase = [ render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ ) for i, image in enumerate(lowercase__ ) ] if do_normalize: __lowercase = [self.normalize(image=lowercase__ ) for image in images] # convert to torch tensor and permute __lowercase = [ self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ ) for image in images ] # create attention mask in numpy __lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] __lowercase = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ ) return encoded_outputs
41
0
'''simple docstring''' __a: Optional[Any] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ __a: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __a: Any = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
152
'''simple docstring''' import doctest from collections import deque import numpy as np class lowercase_ : """simple docstring""" def __init__( self : Optional[Any] ): __lowercase = [2, 1, 2, -1] __lowercase = [1, 2, 3, 4] def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = len(self.first_signal ) __lowercase = len(self.second_signal ) __lowercase = max(lowercase__ ,lowercase__ ) # create a zero matrix of max_length x max_length __lowercase = [[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __lowercase = deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ ,2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
41
0
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") __A = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) __A = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(1_0_0_0_0): out_file.write(data) __A = BeautifulSoup(res.text, "html.parser") __A = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'''https://google.com{link.get('href')}''')
586
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
41
0
"""simple docstring""" class _snake_case : """simple docstring""" def __init__( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = 0 _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Tuple = {} def _lowerCAmelCase ( self : Union[str, Any] , _A : str): """simple docstring""" if vertex not in self.adjacency: _SCREAMING_SNAKE_CASE : List[Any] = {} self.num_vertices += 1 def _lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : List[str] , _A : Any): """simple docstring""" self.add_vertex(lowercase__) self.add_vertex(lowercase__) if head == tail: return _SCREAMING_SNAKE_CASE : int = weight _SCREAMING_SNAKE_CASE : List[Any] = weight def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.get_edges() for edge in edges: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = edge edges.remove((tail, head, weight)) for i in range(len(lowercase__)): _SCREAMING_SNAKE_CASE : str = list(edges[i]) edges.sort(key=lambda _A: e[2]) for i in range(len(lowercase__) - 1): if edges[i][2] >= edges[i + 1][2]: _SCREAMING_SNAKE_CASE : Any = edges[i][2] + 1 for edge in edges: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = edge _SCREAMING_SNAKE_CASE : Dict = weight _SCREAMING_SNAKE_CASE : Optional[int] = weight def __str__( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = """""" for tail in self.adjacency: for head in self.adjacency[tail]: _SCREAMING_SNAKE_CASE : Optional[int] = self.adjacency[head][tail] string += f"""{head} -> {tail} == {weight}\n""" return string.rstrip("""\n""") def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail])) return output def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" return self.adjacency.keys() @staticmethod def _lowerCAmelCase ( _A : str=None , _A : Any=None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = Graph() if vertices is None: _SCREAMING_SNAKE_CASE : Optional[Any] = [] if edges is None: _SCREAMING_SNAKE_CASE : int = [] for vertex in vertices: g.add_vertex(lowercase__) for edge in edges: g.add_edge(*lowercase__) return g class _snake_case : """simple docstring""" def __init__( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = {} _SCREAMING_SNAKE_CASE : Any = {} def __len__( self : Dict): """simple docstring""" return len(self.parent) def _lowerCAmelCase ( self : Any , _A : Any): """simple docstring""" if item in self.parent: return self.find(lowercase__) _SCREAMING_SNAKE_CASE : Dict = item _SCREAMING_SNAKE_CASE : Optional[Any] = 0 return item def _lowerCAmelCase ( self : Union[str, Any] , _A : Dict): """simple docstring""" if item not in self.parent: return self.make_set(lowercase__) if item != self.parent[item]: _SCREAMING_SNAKE_CASE : Optional[int] = self.find(self.parent[item]) return self.parent[item] def _lowerCAmelCase ( self : int , _A : Tuple , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.find(lowercase__) _SCREAMING_SNAKE_CASE : List[str] = self.find(lowercase__) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _SCREAMING_SNAKE_CASE : List[Any] = roota return roota if self.rank[roota] < self.rank[roota]: _SCREAMING_SNAKE_CASE : Tuple = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _SCREAMING_SNAKE_CASE : Any = roota return roota return None @staticmethod def _lowerCAmelCase ( _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = graph.num_vertices _SCREAMING_SNAKE_CASE : str = Graph.UnionFind() _SCREAMING_SNAKE_CASE : Tuple = [] while num_components > 1: _SCREAMING_SNAKE_CASE : List[Any] = {} for vertex in graph.get_vertices(): _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : int = graph.get_edges() for edge in edges: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = edge edges.remove((tail, head, weight)) for edge in edges: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = edge _SCREAMING_SNAKE_CASE : Optional[Any] = union_find.find(lowercase__) _SCREAMING_SNAKE_CASE : List[str] = union_find.find(lowercase__) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _SCREAMING_SNAKE_CASE : Union[str, Any] = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _SCREAMING_SNAKE_CASE : Optional[Any] = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = cheap_edge[vertex] if union_find.find(lowercase__) != union_find.find(lowercase__): union_find.union(lowercase__ , lowercase__) mst_edges.append(cheap_edge[vertex]) _SCREAMING_SNAKE_CASE : str = num_components - 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = Graph.build(edges=lowercase__) return mst
338
'''simple docstring''' import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase__ = getLogger(__name__) lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ): """simple docstring""" __lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' ) __lowercase = str(A__ ) __lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ ) if fpaa: __lowercase = model.half() __lowercase = AutoTokenizer.from_pretrained(A__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __lowercase = time.time() # update config with task specific params use_task_specific_params(A__ , A__ ) if prefix is None: __lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ): __lowercase = [prefix + text for text in examples_chunk] __lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ ) __lowercase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , ) __lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __lowercase = int(time.time() - start_time ) # seconds __lowercase = len(A__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _A ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def _A ( A__=True ): """simple docstring""" __lowercase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __lowercase , __lowercase = parser.parse_known_args() __lowercase = parse_numeric_n_bool_cl_kwargs(A__ ) if parsed_args and verbose: print(F"parsed the following generate kwargs: {parsed_args}" ) __lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __lowercase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=A__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __lowercase = generate_summaries_or_translations( A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , ) if args.reference_path is None: return {} # Compute scores __lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge __lowercase = [x.rstrip() for x in open(args.save_path ).readlines()] __lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )] __lowercase = score_fn(A__ , A__ ) scores.update(A__ ) if args.dump_args: scores.update(A__ ) if args.info: __lowercase = args.info if verbose: print(A__ ) if args.score_path is not None: json.dump(A__ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
41
0
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def _UpperCamelCase ( __UpperCamelCase ) -> Dict: print('Loading config file...' ) def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ): lowerCamelCase_ = [] for k, v in d.items(): lowerCamelCase_ = parent_key + sep + k if parent_key else k if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCamelCase ) lowerCamelCase_ = argparse.Namespace() with open(__UpperCamelCase ,'r' ) as yaml_file: try: lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader ) lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase ) for k, v in flat_cfg.items(): setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) ) return config def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: lowerCamelCase_ = MobileViTVaConfig() lowerCamelCase_ = False # dataset if task_name.startswith('imagenet1k_' ): lowerCamelCase_ = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowerCamelCase_ = 3_84 else: lowerCamelCase_ = 2_56 lowerCamelCase_ = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): lowerCamelCase_ = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowerCamelCase_ = 3_84 else: lowerCamelCase_ = 2_56 lowerCamelCase_ = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): lowerCamelCase_ = 1_51 lowerCamelCase_ = 5_12 lowerCamelCase_ = 'ade20k-id2label.json' lowerCamelCase_ = True elif task_name.startswith('voc_' ): lowerCamelCase_ = 21 lowerCamelCase_ = 5_12 lowerCamelCase_ = 'pascal-voc-id2label.json' lowerCamelCase_ = True # orig_config lowerCamelCase_ = load_orig_config_file(__UpperCamelCase ) assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 ) assert ( getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 ) if "_deeplabv3" in task_name: lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] ) lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 ) lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 ) # id2label lowerCamelCase_ = 'huggingface/label-files' lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) ) lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: lowerCamelCase_ = dct.pop(__UpperCamelCase ) lowerCamelCase_ = val def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> Dict: if base_model: lowerCamelCase_ = '' else: lowerCamelCase_ = 'mobilevitv2.' lowerCamelCase_ = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase_ = k[8:] else: lowerCamelCase_ = k if ".block." in k: lowerCamelCase_ = k_new.replace('.block.' ,'.' ) if ".conv." in k: lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' ) if ".norm." in k: lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' ) if "conv_1." in k: lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' ) if ".red_1x1." in k: lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase_ = [0, 1] elif i == 4: lowerCamelCase_ = [0, 1, 2, 3] elif i == 5: lowerCamelCase_ = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase_ = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase_ = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' ) if "pre_norm_attn.1." in k: lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' ) if "pre_norm_ffn.0." in k: lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' ) if "pre_norm_ffn.1." in k: lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' ) if "pre_norm_ffn.3." in k: lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' ) if "classifier.1." in k: lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' ) if "seg_head." in k: lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' ) if ".aspp_layer." in k: lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' ) if ".aspp_pool." in k: lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' ) rename_keys.append((k, k_new) ) return rename_keys def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]: lowerCamelCase_ = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( ) -> Optional[Any]: lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase ) # load original state_dict lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval() lowerCamelCase_ = False else: lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval() lowerCamelCase_ = False # remove and rename some keys of load the original model lowerCamelCase_ = checkpoint remove_unused_keys(__UpperCamelCase ) lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load modified state_dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 ) lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' ) lowerCamelCase_ = model(**__UpperCamelCase ) # verify classification model if task_name.startswith('imagenet' ): lowerCamelCase_ = outputs.logits lowerCamelCase_ = logits.argmax(-1 ).item() print('Predicted class:' ,model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ) assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
42
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( __UpperCamelCase ) -> bool: lowerCamelCase_ = str(__UpperCamelCase ) return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' ) def _UpperCamelCase ( ) -> int | None: for base_num in range(99_99 ,49_99 ,-1 ): lowerCamelCase_ = 10_00_02 * base_num if is_9_pandigital(__UpperCamelCase ): return candidate for base_num in range(3_33 ,99 ,-1 ): lowerCamelCase_ = 1_00_20_03 * base_num if is_9_pandigital(__UpperCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
42
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A_ = logging.get_logger(__name__) class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ['input_features', 'is_longer'] def __init__( self , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=48000 , SCREAMING_SNAKE_CASE_=480 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 14000 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fusion" , SCREAMING_SNAKE_CASE_ = "repeatpad" , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]: '''simple docstring''' super().__init__( feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ = top_db lowerCamelCase_ = truncation lowerCamelCase_ = padding lowerCamelCase_ = fft_window_size lowerCamelCase_ = (fft_window_size >> 1) + 1 lowerCamelCase_ = hop_length lowerCamelCase_ = max_length_s lowerCamelCase_ = max_length_s * sampling_rate lowerCamelCase_ = sampling_rate lowerCamelCase_ = frequency_min lowerCamelCase_ = frequency_max lowerCamelCase_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm=SCREAMING_SNAKE_CASE_ , mel_scale='htk' , ) lowerCamelCase_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='slaney' , mel_scale='slaney' , ) def UpperCamelCase( self ) -> Dict[str, Any]: '''simple docstring''' lowerCamelCase_ = copy.deepcopy(self.__dict__ ) lowerCamelCase_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray: '''simple docstring''' lowerCamelCase_ = spectrogram( SCREAMING_SNAKE_CASE_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=SCREAMING_SNAKE_CASE_ , log_mel='dB' , ) return log_mel_spectrogram.T def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: '''simple docstring''' lowerCamelCase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase_ = [0] # randomly choose index for each part lowerCamelCase_ = np.random.choice(ranges[0] ) lowerCamelCase_ = np.random.choice(ranges[1] ) lowerCamelCase_ = np.random.choice(ranges[2] ) lowerCamelCase_ = mel[idx_front : idx_front + chunk_frames, :] lowerCamelCase_ = mel[idx_middle : idx_middle + chunk_frames, :] lowerCamelCase_ = mel[idx_back : idx_back + chunk_frames, :] lowerCamelCase_ = torch.tensor(mel[None, None, :] ) lowerCamelCase_ = torch.nn.functional.interpolate( SCREAMING_SNAKE_CASE_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = mel_shrink[0][0].numpy() lowerCamelCase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCamelCase_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) - max_length lowerCamelCase_ = np.random.randint(0 , overflow + 1 ) lowerCamelCase_ = waveform[idx : idx + max_length] lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters ) lowerCamelCase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCamelCase_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCamelCase_ = np.stack([mel, mel, mel, mel] , axis=0 ) lowerCamelCase_ = False else: lowerCamelCase_ = self._random_mel_fusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = True else: raise NotImplementedError(f'''data_truncating {truncation} not implemented''' ) else: lowerCamelCase_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCamelCase_ = int(max_length / len(SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase_ = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCamelCase_ = int(max_length / len(SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase_ = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase_ = np.pad(SCREAMING_SNAKE_CASE_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters ) lowerCamelCase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature: '''simple docstring''' lowerCamelCase_ = truncation if truncation is not None else self.truncation lowerCamelCase_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCamelCase_ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase_ = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ )] # convert to mel spectrogram, truncate and pad if needed. lowerCamelCase_ = [ self._get_input_mel(SCREAMING_SNAKE_CASE_ , max_length if max_length else self.nb_max_samples , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech ] lowerCamelCase_ = [] lowerCamelCase_ = [] for mel, longer in padded_inputs: input_mel.append(SCREAMING_SNAKE_CASE_ ) is_longer.append(SCREAMING_SNAKE_CASE_ ) if truncation == "fusion" and sum(SCREAMING_SNAKE_CASE_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCamelCase_ = np.random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase_ = True if isinstance(input_mel[0] , SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCamelCase_ = [[longer] for longer in is_longer] lowerCamelCase_ = {'input_features': input_mel, 'is_longer': is_longer} lowerCamelCase_ = BatchFeature(SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: lowerCamelCase_ = input_features.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return input_features
42
'''simple docstring''' A_ = "Input must be a string of 8 numbers plus letter" A_ = "TRWAGMYFPDXBNJZSQVHLCKE" def _UpperCamelCase ( __UpperCamelCase ) -> bool: if not isinstance(__UpperCamelCase ,__UpperCamelCase ): lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}''' raise TypeError(__UpperCamelCase ) lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper() if len(__UpperCamelCase ) != 9: raise ValueError(__UpperCamelCase ) try: lowerCamelCase_ = int(spanish_id_clean[0:8] ) lowerCamelCase_ = spanish_id_clean[8] except ValueError as ex: raise ValueError(__UpperCamelCase ) from ex if letter.isdigit(): raise ValueError(__UpperCamelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
42
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer A_ = logging.get_logger(__name__) A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} A_ = { "vocab_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json" ), "google/realm-orqa-nq-openqa": ( "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-nq-reader": ( "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-openqa": ( "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-reader": ( "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json" ), }, } A_ = { "google/realm-cc-news-pretrained-embedder": 512, "google/realm-cc-news-pretrained-encoder": 512, "google/realm-cc-news-pretrained-scorer": 512, "google/realm-cc-news-pretrained-openqa": 512, "google/realm-orqa-nq-openqa": 512, "google/realm-orqa-nq-reader": 512, "google/realm-orqa-wq-openqa": 512, "google/realm-orqa-wq-reader": 512, } A_ = { "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = RealmTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]: '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars ): lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = strip_accents lowerCamelCase_ = tokenize_chinese_chars lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = do_lower_case def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = PaddingStrategy.MAX_LENGTH lowerCamelCase_ = text lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = { 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ): if batch_text_pair is not None: lowerCamelCase_ = batch_text_pair[idx] else: lowerCamelCase_ = None lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = encoded_candidates.get('input_ids' ) lowerCamelCase_ = encoded_candidates.get('attention_mask' ) lowerCamelCase_ = encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ ) if encoded_attention_mask is not None: output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0} return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: '''simple docstring''' lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: '''simple docstring''' lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ )
42
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False} def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' super().setUp() # fmt: off lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 lowerCamelCase_ = {'unk_token': '<unk>'} lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) ) def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀' lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) return text, ids def UpperCamelCase( self ) -> Tuple: '''simple docstring''' pass # TODO add if relevant def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' pass # TODO add if relevant def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' pass # TODO add if relevant def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() # Testing tokenization lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。' lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing conversion to ids without special tokens lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing conversion to ids with special tokens lowerCamelCase_ = tokens + [tokenizer.unk_token] lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() # Testing tokenization lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。' lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization lowerCamelCase_ = 'こんにちは、世界。' lowerCamelCase_ = 'こんばんは、㔺界。😀' lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀' lowerCamelCase_ = tokenizer.encode(prefix_text + input_text ) lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text ) lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization lowerCamelCase_ = 'こんにちは、世界。' lowerCamelCase_ = 'こんばんは、㔺界。😀' lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2 lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2 lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1) lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0] lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) lowerCamelCase_ = tokenizer.encode('あンいワ' ) lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' ) lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) # fmt: off lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Any: '''simple docstring''' pass def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' pass
42
1
'''simple docstring''' import doctest from collections import deque import numpy as np class UpperCAmelCase : '''simple docstring''' def __init__( self ) -> None: '''simple docstring''' lowerCamelCase_ = [2, 1, 2, -1] lowerCamelCase_ = [1, 2, 3, 4] def UpperCamelCase( self ) -> list[float]: '''simple docstring''' lowerCamelCase_ = len(self.first_signal ) lowerCamelCase_ = len(self.second_signal ) lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # create a zero matrix of max_length x max_length lowerCamelCase_ = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE_ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = deque(self.second_signal ) rotated_signal.rotate(SCREAMING_SNAKE_CASE_ ) for j, item in enumerate(SCREAMING_SNAKE_CASE_ ): matrix[i][j] += item # multiply the matrix with the first signal lowerCamelCase_ = np.matmul(np.transpose(SCREAMING_SNAKE_CASE_ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(SCREAMING_SNAKE_CASE_ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
42
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging A_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict: '''simple docstring''' super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(SCREAMING_SNAKE_CASE_ )}.''' ) # get prompt text embeddings lowerCamelCase_ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) lowerCamelCase_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 ) lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCamelCase_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCamelCase_ = 42 if negative_prompt is None: lowerCamelCase_ = [''] elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !=''' f''' {type(SCREAMING_SNAKE_CASE_ )}.''' ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' ' the batch size of `prompt`.' ) else: lowerCamelCase_ = negative_prompt lowerCamelCase_ = text_input_ids.shape[-1] lowerCamelCase_ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , ) lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCamelCase_ = uncond_embeddings.shape[1] lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) lowerCamelCase_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowerCamelCase_ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device ) lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: lowerCamelCase_ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowerCamelCase_ = latents_reference.to(self.device ) lowerCamelCase_ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2 lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2 lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy lowerCamelCase_ = 0 if dx < 0 else dx lowerCamelCase_ = 0 if dy < 0 else dy lowerCamelCase_ = max(-dx , 0 ) lowerCamelCase_ = max(-dy , 0 ) # import pdb # pdb.set_trace() lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowerCamelCase_ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ = {} if accepts_eta: lowerCamelCase_ = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ): # expand the latents if we are doing classifier free guidance lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform guidance if do_classifier_free_guidance: lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 ) lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = 1 / 0.18_215 * latents lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to( self.device ) lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker( images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: lowerCamelCase_ = None if output_type == "pil": lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
42
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
'''simple docstring''' import pprint import requests A_ = "https://zenquotes.io/api" def _UpperCamelCase ( ) -> list: return requests.get(API_ENDPOINT_URL + '/today' ).json() def _UpperCamelCase ( ) -> list: return requests.get(API_ENDPOINT_URL + '/random' ).json() if __name__ == "__main__": A_ = random_quotes() pprint.pprint(response)
42
1
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = [0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE_ = [0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=3 , ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = parent lowerCamelCase_ = do_resize lowerCamelCase_ = size if size is not None else {'shortest_edge': 288} lowerCamelCase_ = size_divisor lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = do_center_crop lowerCamelCase_ = image_mean lowerCamelCase_ = image_std lowerCamelCase_ = do_pad lowerCamelCase_ = batch_size lowerCamelCase_ = num_channels lowerCamelCase_ = min_resolution lowerCamelCase_ = max_resolution def UpperCamelCase( self ) -> Dict: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]: '''simple docstring''' if not batched: lowerCamelCase_ = self.size['shortest_edge'] lowerCamelCase_ = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ): lowerCamelCase_ ,lowerCamelCase_ = image.size else: lowerCamelCase_ ,lowerCamelCase_ = image.shape[1], image.shape[2] lowerCamelCase_ = size / min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if h < w: lowerCamelCase_ ,lowerCamelCase_ = size, scale * w else: lowerCamelCase_ ,lowerCamelCase_ = scale * h, size lowerCamelCase_ = int((1333 / 800) * size ) if max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > max_size: lowerCamelCase_ = max_size / max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = newh * scale lowerCamelCase_ = neww * scale lowerCamelCase_ ,lowerCamelCase_ = int(newh + 0.5 ), int(neww + 0.5 ) lowerCamelCase_ ,lowerCamelCase_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCamelCase_ = [] for image in image_inputs: lowerCamelCase_ ,lowerCamelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0] lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = BridgeTowerImageProcessor if is_vision_available() else None def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = BridgeTowerImageProcessingTester(self ) @property def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_mean' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_std' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_normalize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size_divisor' ) ) def UpperCamelCase( self ) -> Any: '''simple docstring''' pass def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
42
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int: '''simple docstring''' lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: '''simple docstring''' lowerCamelCase_ = self.num_labels lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = () SCREAMING_SNAKE_CASE_ = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = True def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = EsmModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCamelCase( self ) -> Any: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowerCamelCase_ = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.empty(2 , 4 , 30 ) lowerCamelCase_ = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def UpperCamelCase( self ) -> Any: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' pass @require_torch class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' @slow def UpperCamelCase( self ) -> Any: '''simple docstring''' with torch.no_grad(): lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0] lowerCamelCase_ = 33 lowerCamelCase_ = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.tensor( [[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCamelCase( self ) -> Tuple: '''simple docstring''' with torch.no_grad(): lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0] # compare the actual values for a slice. lowerCamelCase_ = torch.tensor( [[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
42
1
'''simple docstring''' import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig A_ = logging.get_logger(__name__) class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' lowerCamelCase_ = question_encoder lowerCamelCase_ = generator lowerCamelCase_ = self.question_encoder def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: '''simple docstring''' if os.path.isfile(SCREAMING_SNAKE_CASE_ ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'question_encoder_tokenizer' ) lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'generator_tokenizer' ) self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE_ ) self.generator.save_pretrained(SCREAMING_SNAKE_CASE_ ) @classmethod def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase_ = kwargs.pop('config' , SCREAMING_SNAKE_CASE_ ) if config is None: lowerCamelCase_ = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = AutoTokenizer.from_pretrained( SCREAMING_SNAKE_CASE_ , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) lowerCamelCase_ = AutoTokenizer.from_pretrained( SCREAMING_SNAKE_CASE_ , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: '''simple docstring''' return self.current_tokenizer(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' return self.generator.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' return self.generator.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.question_encoder def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.generator def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "longest" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding: '''simple docstring''' warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , SCREAMING_SNAKE_CASE_ , ) if max_length is None: lowerCamelCase_ = self.current_tokenizer.model_max_length lowerCamelCase_ = self( SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase_ = self.current_tokenizer.model_max_length lowerCamelCase_ = self( text_target=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ = labels['input_ids'] return model_inputs
42
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
42
1
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=128 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Tuple: '''simple docstring''' lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase( self ) -> Dict: '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = self.prepare_config_and_inputs() lowerCamelCase_ = True lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' lowerCamelCase_ = NezhaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict: '''simple docstring''' lowerCamelCase_ = True lowerCamelCase_ = NezhaModel(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = NezhaForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = NezhaForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = NezhaForPreTraining(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: '''simple docstring''' lowerCamelCase_ = NezhaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: '''simple docstring''' lowerCamelCase_ = self.num_labels lowerCamelCase_ = NezhaForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: '''simple docstring''' lowerCamelCase_ = self.num_labels lowerCamelCase_ = NezhaForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' lowerCamelCase_ = self.num_choices lowerCamelCase_ = NezhaForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = True def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]: '''simple docstring''' lowerCamelCase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = NezhaModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCamelCase_ = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCamelCase( self ) -> Tuple: '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = NezhaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowerCamelCase_ = True lowerCamelCase_ = model_class(config=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'bert.pt' ) ) lowerCamelCase_ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'bert.pt' ) , map_location=SCREAMING_SNAKE_CASE_ ) loaded(inputs_dict['input_ids'].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['attention_mask'].to(SCREAMING_SNAKE_CASE_ ) ) @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' ) lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] lowerCamelCase_ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' ) lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] lowerCamelCase_ = torch.Size((1, 6, 21128) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
42
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" ), } class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet' SCREAMING_SNAKE_CASE_ = ['past_key_values'] SCREAMING_SNAKE_CASE_ = { 'num_attention_heads': 'num_encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: '''simple docstring''' lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = num_encoder_layers lowerCamelCase_ = num_encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = num_decoder_layers lowerCamelCase_ = num_decoder_attention_heads lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = init_std # Normal(0, this parameter) lowerCamelCase_ = activation_function # parameters for xlmprophetnet lowerCamelCase_ = ngram lowerCamelCase_ = num_buckets lowerCamelCase_ = relative_max_distance lowerCamelCase_ = disable_ngram_loss lowerCamelCase_ = eps # 3 Types of Dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = dropout lowerCamelCase_ = use_cache super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) @property def UpperCamelCase( self ) -> int: '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
42
1
'''simple docstring''' import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py A_ = "src/diffusers" A_ = "." # This is to make sure the diffusers module imported is the one in the repo. A_ = importlib.util.spec_from_file_location( "diffusers", os.path.join(DIFFUSERS_PATH, "__init__.py"), submodule_search_locations=[DIFFUSERS_PATH], ) A_ = spec.loader.load_module() def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: return line.startswith(__UpperCamelCase ) or len(__UpperCamelCase ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' ,__UpperCamelCase ) is not None def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]: lowerCamelCase_ = object_name.split('.' ) lowerCamelCase_ = 0 # First let's find the module where our object lives. lowerCamelCase_ = parts[i] while i < len(__UpperCamelCase ) and not os.path.isfile(os.path.join(__UpperCamelCase ,f'''{module}.py''' ) ): i += 1 if i < len(__UpperCamelCase ): lowerCamelCase_ = os.path.join(__UpperCamelCase ,parts[i] ) if i >= len(__UpperCamelCase ): raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(__UpperCamelCase ,f'''{module}.py''' ) ,'r' ,encoding='utf-8' ,newline='\n' ) as f: lowerCamelCase_ = f.readlines() # Now let's find the class / func in the code! lowerCamelCase_ = '' lowerCamelCase_ = 0 for name in parts[i + 1 :]: while ( line_index < len(__UpperCamelCase ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(__UpperCamelCase ): raise ValueError(f''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowerCamelCase_ = line_index while line_index < len(__UpperCamelCase ) and _should_continue(lines[line_index] ,__UpperCamelCase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCamelCase_ = lines[start_index:line_index] return "".join(__UpperCamelCase ) A_ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") A_ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)") A_ = re.compile(R"<FILL\s+[^>]*>") def _UpperCamelCase ( __UpperCamelCase ) -> List[Any]: lowerCamelCase_ = code.split('\n' ) lowerCamelCase_ = 0 while idx < len(__UpperCamelCase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(__UpperCamelCase ): return re.search(R'^(\s*)\S' ,lines[idx] ).groups()[0] return "" def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]: lowerCamelCase_ = len(get_indent(__UpperCamelCase ) ) > 0 if has_indent: lowerCamelCase_ = f'''class Bla:\n{code}''' lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 ,preview=__UpperCamelCase ) lowerCamelCase_ = black.format_str(__UpperCamelCase ,mode=__UpperCamelCase ) lowerCamelCase_ ,lowerCamelCase_ = style_docstrings_in_code(__UpperCamelCase ) return result[len('class Bla:\n' ) :] if has_indent else result def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> List[str]: with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f: lowerCamelCase_ = f.readlines() lowerCamelCase_ = [] lowerCamelCase_ = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(__UpperCamelCase ): lowerCamelCase_ = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = search.groups() lowerCamelCase_ = find_code_in_diffusers(__UpperCamelCase ) lowerCamelCase_ = get_indent(__UpperCamelCase ) lowerCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2 lowerCamelCase_ = theoretical_indent lowerCamelCase_ = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowerCamelCase_ = True while line_index < len(__UpperCamelCase ) and should_continue: line_index += 1 if line_index >= len(__UpperCamelCase ): break lowerCamelCase_ = lines[line_index] lowerCamelCase_ = _should_continue(__UpperCamelCase ,__UpperCamelCase ) and re.search(f'''^{indent}# End copy''' ,__UpperCamelCase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCamelCase_ = lines[start_index:line_index] lowerCamelCase_ = ''.join(__UpperCamelCase ) # Remove any nested `Copied from` comments to avoid circular copies lowerCamelCase_ = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__UpperCamelCase ) is None] lowerCamelCase_ = '\n'.join(__UpperCamelCase ) # Before comparing, use the `replace_pattern` on the original code. if len(__UpperCamelCase ) > 0: lowerCamelCase_ = replace_pattern.replace('with' ,'' ).split(',' ) lowerCamelCase_ = [_re_replace_pattern.search(__UpperCamelCase ) for p in patterns] for pattern in patterns: if pattern is None: continue lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = pattern.groups() lowerCamelCase_ = re.sub(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if option.strip() == "all-casing": lowerCamelCase_ = re.sub(obja.lower() ,obja.lower() ,__UpperCamelCase ) lowerCamelCase_ = re.sub(obja.upper() ,obja.upper() ,__UpperCamelCase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowerCamelCase_ = blackify(lines[start_index - 1] + theoretical_code ) lowerCamelCase_ = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowerCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:] lowerCamelCase_ = start_index + 1 if overwrite and len(__UpperCamelCase ) > 0: # Warn the user a file has been modified. print(f'''Detected changes, rewriting {filename}.''' ) with open(__UpperCamelCase ,'w' ,encoding='utf-8' ,newline='\n' ) as f: f.writelines(__UpperCamelCase ) return diffs def _UpperCamelCase ( __UpperCamelCase = False ) -> Tuple: lowerCamelCase_ = glob.glob(os.path.join(__UpperCamelCase ,'**/*.py' ) ,recursive=__UpperCamelCase ) lowerCamelCase_ = [] for filename in all_files: lowerCamelCase_ = is_copy_consistent(__UpperCamelCase ,__UpperCamelCase ) diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(__UpperCamelCase ) > 0: lowerCamelCase_ = '\n'.join(__UpperCamelCase ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") A_ = parser.parse_args() check_copies(args.fix_and_overwrite)
42
'''simple docstring''' import colorsys from PIL import Image # type: ignore def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float: lowerCamelCase_ = x lowerCamelCase_ = y for step in range(__UpperCamelCase ): # noqa: B007 lowerCamelCase_ = a * a - b * b + x lowerCamelCase_ = 2 * a * b + y lowerCamelCase_ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _UpperCamelCase ( __UpperCamelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def _UpperCamelCase ( __UpperCamelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) ) def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image: lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) ) lowerCamelCase_ = img.load() # loop through the image-coordinates for image_x in range(__UpperCamelCase ): for image_y in range(__UpperCamelCase ): # determine the figure-coordinates based on the image-coordinates lowerCamelCase_ = figure_width / image_width * image_height lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase ) else: lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure A_ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
42
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase( self ) -> int: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = (3, 32, 128) lowerCamelCase_ = tempfile.mkdtemp() # fmt: off lowerCamelCase_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' ) lowerCamelCase_ = { 'do_normalize': False, 'do_resize': True, 'image_processor_type': 'ViTImageProcessor', 'resample': 3, 'size': {'height': 32, 'width': 128}, } lowerCamelCase_ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> List[str]: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) lowerCamelCase_ = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) return image_input def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCamelCase_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) lowerCamelCase_ = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ) lowerCamelCase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = 'test' lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = 'test' lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase_ = processor.char_decode(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = [seq.replace(' ' , '' ) for seq in decoded_tok] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = None lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.randn(1 , 27 , 38 ) lowerCamelCase_ = torch.randn(1 , 27 , 50257 ) lowerCamelCase_ = torch.randn(1 , 27 , 30522 ) lowerCamelCase_ = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
42
'''simple docstring''' from math import isclose, sqrt def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]: lowerCamelCase_ = point_y / 4 / point_x lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 lowerCamelCase_ = outgoing_gradient**2 + 4 lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 lowerCamelCase_ = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) lowerCamelCase_ = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int: lowerCamelCase_ = 0 lowerCamelCase_ = first_x_coord lowerCamelCase_ = first_y_coord lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
42
1
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray: lowerCamelCase_ = cva.getAffineTransform(__UpperCamelCase ,__UpperCamelCase ) return cva.warpAffine(__UpperCamelCase ,__UpperCamelCase ,(rows, cols) ) if __name__ == "__main__": # read original image A_ = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value A_ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape A_ , A_ = gray_img.shape # set different points to rotate image A_ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) A_ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) A_ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) A_ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list A_ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations A_ = plt.figure(1) A_ = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
42
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable: raise ValueError( 'Warning: upper bound of deterministic test is exceeded. ' 'Pass allow_probable=True to allow probabilistic test. ' 'A return value of True indicates a probable prime.' ) # array bounds provided by analysis lowerCamelCase_ = [ 20_47, 1_37_36_53, 25_32_60_01, 32_15_03_17_51, 2_15_23_02_89_87_47, 3_47_47_49_66_03_83, 3_41_55_00_71_72_83_21, 1, 3_82_51_23_05_65_46_41_30_51, 1, 1, 31_86_65_85_78_34_03_11_51_16_74_61, 3_31_70_44_06_46_79_88_73_85_96_19_81, ] lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(__UpperCamelCase ,1 ): if n < _p: # then we have our last prime to check lowerCamelCase_ = primes[:idx] break lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: lowerCamelCase_ = False for r in range(__UpperCamelCase ): lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): lowerCamelCase_ = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def _UpperCamelCase ( ) -> None: assert not miller_rabin(5_61 ) assert miller_rabin(5_63 ) # 2047 assert not miller_rabin(83_82_01 ) assert miller_rabin(83_82_07 ) # 1_373_653 assert not miller_rabin(17_31_60_01 ) assert miller_rabin(17_31_60_17 ) # 25_326_001 assert not miller_rabin(30_78_38_66_41 ) assert miller_rabin(30_78_38_66_53 ) # 3_215_031_751 assert not miller_rabin(1_71_30_45_57_48_01 ) assert miller_rabin(1_71_30_45_57_48_19 ) # 2_152_302_898_747 assert not miller_rabin(2_77_97_99_72_83_07 ) assert miller_rabin(2_77_97_99_72_83_27 ) # 3_474_749_660_383 assert not miller_rabin(1_13_85_00_23_90_94_41 ) assert miller_rabin(1_13_85_00_23_90_95_27 ) # 341_550_071_728_321 assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 ) assert miller_rabin(1_27_50_41_01_88_48_80_43_91 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 ) assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 ) assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
42
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers A_ = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
42
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler A_ = 16 A_ = 32 def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]: lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase ) lowerCamelCase_ = load_dataset('glue' ,'mrpc' ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase_ = datasets.map( __UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' ) return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' ) # Instantiate dataloaders. lowerCamelCase_ = DataLoader( tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase ) lowerCamelCase_ = DataLoader( tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: model.eval() lowerCamelCase_ = 0 for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ = model(**__UpperCamelCase ) lowerCamelCase_ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__UpperCamelCase ) - 1: lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__UpperCamelCase ,references=__UpperCamelCase ,) lowerCamelCase_ = metric.compute() return eval_metric["accuracy"] def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]: # Initialize accelerator lowerCamelCase_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ = config['lr'] lowerCamelCase_ = int(config['num_epochs'] ) lowerCamelCase_ = int(config['seed'] ) lowerCamelCase_ = int(config['batch_size'] ) lowerCamelCase_ = args.model_name_or_path set_seed(__UpperCamelCase ) lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase ) # Instantiate optimizer lowerCamelCase_ = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowerCamelCase_ = 1 lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase_ = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,) else: lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # We need to keep track of how many total steps we have iterated over lowerCamelCase_ = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase_ = 0 lowerCamelCase_ = evaluate.load('glue' ,'mrpc' ) lowerCamelCase_ = num_epochs if args.partial_train_epoch is not None: lowerCamelCase_ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1] lowerCamelCase_ = '' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCamelCase_ = int(__UpperCamelCase ) + 1 lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase ) accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] ) accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] ) with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f: lowerCamelCase_ = json.load(__UpperCamelCase ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCamelCase_ = {} for epoch in range(__UpperCamelCase ,__UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): lowerCamelCase_ = model(**__UpperCamelCase ) lowerCamelCase_ = outputs.loss lowerCamelCase_ = loss / gradient_accumulation_steps accelerator.backward(__UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCamelCase_ = f'''epoch_{epoch}''' lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase ) accelerator.save_state(__UpperCamelCase ) lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCamelCase_ = accuracy lowerCamelCase_ = lr_scheduler.get_lr()[0] lowerCamelCase_ = optimizer.param_groups[0]['lr'] lowerCamelCase_ = epoch lowerCamelCase_ = overall_step accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( ) -> str: lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,) parser.add_argument( '--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,) parser.add_argument( '--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,) parser.add_argument( '--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,) parser.add_argument( '--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,) lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(__UpperCamelCase ,__UpperCamelCase ) if __name__ == "__main__": main()
42
1
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = (KDPMaDiscreteScheduler,) SCREAMING_SNAKE_CASE_ = 10 def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> str: '''simple docstring''' lowerCamelCase_ = { 'num_train_timesteps': 1100, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**SCREAMING_SNAKE_CASE_ ) return config def UpperCamelCase( self ) -> Any: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> str: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = output.prev_sample lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2 assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0_002 ) < 1E-3 def UpperCamelCase( self ) -> int: '''simple docstring''' if torch_device == "mps": return lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = output.prev_sample lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 def UpperCamelCase( self ) -> str: '''simple docstring''' if torch_device == "mps": return lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = output.prev_sample lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if str(SCREAMING_SNAKE_CASE_ ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3
42
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: lowerCamelCase_ = ksize + 1 lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa ) # each value for y in range(__UpperCamelCase ): for x in range(__UpperCamelCase ): # distance from center lowerCamelCase_ = x - ksize // 2 lowerCamelCase_ = y - ksize // 2 # degree to radiant lowerCamelCase_ = theta / 1_80 * np.pi lowerCamelCase_ = np.cos(_theta ) lowerCamelCase_ = np.sin(_theta ) # get kernel x lowerCamelCase_ = cos_theta * px + sin_theta * py # get kernel y lowerCamelCase_ = -sin_theta * px + cos_theta * py # fill kernel lowerCamelCase_ = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image A_ = imread("../image_data/lena.jpg") # turn image in gray scale value A_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges A_ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) A_ = out / out.max() * 255 A_ = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
42
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 42 class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ) -> Tuple: '''simple docstring''' super().__init__() lowerCamelCase_ = num_attention_heads lowerCamelCase_ = attention_head_dim lowerCamelCase_ = num_attention_heads * attention_head_dim lowerCamelCase_ = in_channels lowerCamelCase_ = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , eps=1E-6 , affine=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. Define transformers blocks lowerCamelCase_ = nn.ModuleList( [ BasicTransformerBlock( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , double_self_attention=SCREAMING_SNAKE_CASE_ , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , ) for d in range(SCREAMING_SNAKE_CASE_ ) ] ) lowerCamelCase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Any: '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = hidden_states.shape lowerCamelCase_ = batch_frames // num_frames lowerCamelCase_ = hidden_states lowerCamelCase_ = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCamelCase_ = self.norm(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.proj_in(SCREAMING_SNAKE_CASE_ ) # 2. Blocks for block in self.transformer_blocks: lowerCamelCase_ = block( SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ , ) # 3. Output lowerCamelCase_ = self.proj_out(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = ( hidden_states[None, None, :] .reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCamelCase_ = hidden_states.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE_ )
42
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase ( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['transformers', 'torch', 'note_seq'] ) @classmethod def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) @classmethod def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
42
1
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float: if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
42
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase_ = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def UpperCamelCase( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase_ = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , ) return model @property def UpperCamelCase( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase_ = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) lowerCamelCase_ = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) lowerCamelCase_ = DDPMScheduler() lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 ) lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 ) lowerCamelCase_ = output.audios[0] lowerCamelCase_ = output.images[0] lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 ) lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10] lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 lowerCamelCase_ = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) lowerCamelCase_ = DDIMScheduler() lowerCamelCase_ = self.dummy_vqvae_and_unet lowerCamelCase_ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) np.random.seed(0 ) lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 ) lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 ) lowerCamelCase_ = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 lowerCamelCase_ = self.dummy_unet_condition lowerCamelCase_ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) np.random.seed(0 ) lowerCamelCase_ = torch.rand((1, 1, 10) ) lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = output.images[0] lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = torch_device lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 ) lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = output.audios[0] lowerCamelCase_ = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
42
1
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean A_ = 0 A_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right A_ = tuple[int, int] class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None: '''simple docstring''' lowerCamelCase_ = pos_x lowerCamelCase_ = pos_y lowerCamelCase_ = (pos_y, pos_x) lowerCamelCase_ = goal_x lowerCamelCase_ = goal_y lowerCamelCase_ = g_cost lowerCamelCase_ = parent lowerCamelCase_ = self.calculate_heuristic() lowerCamelCase_ = self.g_cost + self.h_cost def UpperCamelCase( self ) -> float: '''simple docstring''' lowerCamelCase_ = self.pos_x - self.goal_x lowerCamelCase_ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> bool: '''simple docstring''' return self.f_cost < other.f_cost class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' lowerCamelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = [self.start] lowerCamelCase_ = [] lowerCamelCase_ = False def UpperCamelCase( self ) -> list[TPosition]: '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCamelCase_ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(SCREAMING_SNAKE_CASE_ ) self.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.get_successors(SCREAMING_SNAKE_CASE_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: # retrieve the best current path lowerCamelCase_ = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) return [self.start.pos] def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> list[Node]: '''simple docstring''' lowerCamelCase_ = [] for action in delta: lowerCamelCase_ = parent.pos_x + action[1] lowerCamelCase_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ) ) return successors def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> list[TPosition]: '''simple docstring''' lowerCamelCase_ = node lowerCamelCase_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCamelCase_ = current_node.parent path.reverse() return path class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' lowerCamelCase_ = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = False def UpperCamelCase( self ) -> list[TPosition]: '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowerCamelCase_ = self.fwd_astar.open_nodes.pop(0 ) lowerCamelCase_ = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = current_bwd_node lowerCamelCase_ = current_fwd_node lowerCamelCase_ = { self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ), self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: # retrieve the best current path lowerCamelCase_ = astar.open_nodes.pop( astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) return [self.fwd_astar.start.pos] def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[TPosition]: '''simple docstring''' lowerCamelCase_ = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ ) bwd_path.pop() bwd_path.reverse() lowerCamelCase_ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] A_ = (0, 0) A_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A_ = time.time() A_ = AStar(init, goal) A_ = a_star.search() A_ = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') A_ = time.time() A_ = BidirectionalAStar(init, goal) A_ = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
42
'''simple docstring''' import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str: lowerCamelCase_ = ascii_letters + digits + punctuation return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(__UpperCamelCase ) lowerCamelCase_ = i // 3 lowerCamelCase_ = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowerCamelCase_ = ( chars_incl + random(__UpperCamelCase ,quotient + remainder ) + random(__UpperCamelCase ,__UpperCamelCase ) + random(__UpperCamelCase ,__UpperCamelCase ) ) lowerCamelCase_ = list(__UpperCamelCase ) shuffle(__UpperCamelCase ) return "".join(__UpperCamelCase ) # random is a generalised function for letters, characters and numbers def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: pass # Put your code here... def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple: pass # Put your code here... def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: pass # Put your code here... def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool: if len(__UpperCamelCase ) < min_length: # Your Password must be at least 8 characters long return False lowerCamelCase_ = any(char in ascii_uppercase for char in password ) lowerCamelCase_ = any(char in ascii_lowercase for char in password ) lowerCamelCase_ = any(char in digits for char in password ) lowerCamelCase_ = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _UpperCamelCase ( ) -> Optional[int]: lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() ) lowerCamelCase_ = input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' ,password_generator(__UpperCamelCase ) ) print( 'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
42
1
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging A_ = logging.get_logger(__name__) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise if not is_sharded: lowerCamelCase_ = os.path.abspath(__UpperCamelCase ) logger.info(f'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' ) logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ = convert_pytorch_state_dict_to_flax(__UpperCamelCase ,__UpperCamelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ = convert_pytorch_sharded_state_dict_to_flax(__UpperCamelCase ,__UpperCamelCase ) return flax_state_dict def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(__UpperCamelCase ) -> bool: return len(set(__UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ = pt_tuple_key[:-1] + ('scale',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ = pt_tuple_key[:-1] + ('mean',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ = pt_tuple_key[:-1] + ('var',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ = pt_tuple_key[:-1] + ('embedding',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__UpperCamelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): lowerCamelCase_ = pt_tensor.transpose(2 ,3 ,1 ,0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__UpperCamelCase ): lowerCamelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ = pt_tuple_key[-2] + '_g' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ = pt_tuple_key[-2] + '_v' if name is not None: lowerCamelCase_ = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: # convert pytorch tensor to numpy lowerCamelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ = flax_model.params['params'] else: lowerCamelCase_ = flax_model.params lowerCamelCase_ = flatten_dict(__UpperCamelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ = flatten_dict(flax_model.params['batch_stats'] ) random_flax_state_dict.update(__UpperCamelCase ) lowerCamelCase_ = {} lowerCamelCase_ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary lowerCamelCase_ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ = pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_ ,lowerCamelCase_ = rename_key_and_reshape_tensor( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # add model prefix if necessary lowerCamelCase_ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ = jnp.asarray(__UpperCamelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ = jnp.asarray(__UpperCamelCase ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ = jnp.asarray(__UpperCamelCase ) return unflatten_dict(__UpperCamelCase ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Dict: import torch # Load the index lowerCamelCase_ = {} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ = torch.load(__UpperCamelCase ) lowerCamelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ = flax_model.params['params'] lowerCamelCase_ = flatten_dict(__UpperCamelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) ) else: lowerCamelCase_ = flax_model.params lowerCamelCase_ = flatten_dict(__UpperCamelCase ) lowerCamelCase_ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary lowerCamelCase_ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ = pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_ ,lowerCamelCase_ = rename_key_and_reshape_tensor( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # add model prefix if necessary lowerCamelCase_ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ = jnp.asarray(__UpperCamelCase ) continue if "var" in flax_key[-1]: lowerCamelCase_ = jnp.asarray(__UpperCamelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ = jnp.asarray(__UpperCamelCase ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ = jnp.asarray(__UpperCamelCase ) return unflatten_dict(__UpperCamelCase ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]: lowerCamelCase_ = os.path.abspath(__UpperCamelCase ) logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ = getattr(__UpperCamelCase ,'Flax' + model.__class__.__name__ ) # load flax weight dict with open(__UpperCamelCase ,'rb' ) as state_f: try: lowerCamelCase_ = from_bytes(__UpperCamelCase ,state_f.read() ) except UnpicklingError: raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]: try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights lowerCamelCase_ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa ,__UpperCamelCase ) ).values() if any(__UpperCamelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) lowerCamelCase_ = jax.tree_util.tree_map( lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,__UpperCamelCase ) lowerCamelCase_ = flatten_dict(__UpperCamelCase ) lowerCamelCase_ = pt_model.state_dict() lowerCamelCase_ = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ = [] lowerCamelCase_ = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ = flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__UpperCamelCase ) not in pt_model_dict: # conv layer lowerCamelCase_ = flax_key_tuple[:-1] + ('weight',) lowerCamelCase_ = jnp.transpose(__UpperCamelCase ,(3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ) not in pt_model_dict: # linear layer lowerCamelCase_ = flax_key_tuple[:-1] + ('weight',) lowerCamelCase_ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ = flax_key_tuple[:-1] + ('weight',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ = flax_key_tuple[:-1] + ('running_mean',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ = flax_key_tuple[:-1] + ('running_var',) if "batch_stats" in flax_state: lowerCamelCase_ = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ = '.'.join(__UpperCamelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ = key.split('.' ) lowerCamelCase_ = None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ = key_components[-2] + '_g' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ = key_components[-2] + '_v' if name is not None: lowerCamelCase_ = key_components[:-3] + [name] lowerCamelCase_ = '.'.join(__UpperCamelCase ) lowerCamelCase_ = key if flax_key in special_pt_names: lowerCamelCase_ = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ = np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase ,np.ndarray ) else flax_tensor lowerCamelCase_ = torch.from_numpy(__UpperCamelCase ) # remove from missing keys missing_keys.remove(__UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(__UpperCamelCase ) pt_model.load_state_dict(__UpperCamelCase ) # re-transform missing_keys to list lowerCamelCase_ = list(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) else: logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__UpperCamelCase ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ' use it for predictions and inference.' ) else: logger.warning( f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' 'If your task is similar to the task the model of the checkpoint was trained on, ' f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
42
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase_ = test_metrics @require_cpu def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def UpperCamelCase( self ) -> Tuple: '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def UpperCamelCase( self ) -> Any: '''simple docstring''' self.test_metrics.main() @require_multi_gpu def UpperCamelCase( self ) -> Any: '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
42
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json", # See all REALM models at https://huggingface.co/models?filter=realm } class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 'realm' def __init__( self , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=128 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=1E-3 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=320 , SCREAMING_SNAKE_CASE_=13353718 , SCREAMING_SNAKE_CASE_=5000 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # Common config lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = retriever_proj_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = num_candidates lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = initializer_range lowerCamelCase_ = type_vocab_size lowerCamelCase_ = layer_norm_eps # Reader config lowerCamelCase_ = span_hidden_size lowerCamelCase_ = max_span_width lowerCamelCase_ = reader_layer_norm_eps lowerCamelCase_ = reader_beam_size lowerCamelCase_ = reader_seq_len # Retrieval config lowerCamelCase_ = num_block_records lowerCamelCase_ = searcher_beam_size
42
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def _UpperCamelCase ( __UpperCamelCase ) -> List[str]: if hor == 1_28: lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') lowerCamelCase_ = (32, 1_28, 2_56) lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D') elif hor == 32: lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') lowerCamelCase_ = (32, 64, 1_28, 2_56) lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D') lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase_ = model.state_dict() lowerCamelCase_ = { 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'up_block_types': up_block_types, 'layers_per_block': 1, 'use_timestep_embedding': True, 'out_block_type': 'OutConv1DBlock', 'norm_num_groups': 8, 'downsample_each_block': False, 'in_channels': 14, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'flip_sin_to_cos': False, 'freq_shift': 1, 'sample_size': 6_55_36, 'mid_block_type': 'MidResTemporalBlock1D', 'act_fn': 'mish', } lowerCamelCase_ = UNetaDModel(**__UpperCamelCase ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase_ = state_dict.pop(__UpperCamelCase ) hf_value_function.load_state_dict(__UpperCamelCase ) torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( ) -> Tuple: lowerCamelCase_ = { 'in_channels': 14, 'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'), 'up_block_types': (), 'out_block_type': 'ValueFunction', 'mid_block_type': 'ValueFunctionMidBlock1D', 'block_out_channels': (32, 64, 1_28, 2_56), 'layers_per_block': 1, 'downsample_each_block': True, 'sample_size': 6_55_36, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'use_timestep_embedding': True, 'flip_sin_to_cos': False, 'freq_shift': 1, 'norm_num_groups': 8, 'act_fn': 'mish', } lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' ) lowerCamelCase_ = model lowerCamelCase_ = UNetaDModel(**__UpperCamelCase ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase_ = state_dict.pop(__UpperCamelCase ) hf_value_function.load_state_dict(__UpperCamelCase ) torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' ) with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
42
1
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int: return int((input_a, input_a).count(0 ) != 0 ) def _UpperCamelCase ( ) -> None: assert nand_gate(0 ,0 ) == 1 assert nand_gate(0 ,1 ) == 1 assert nand_gate(1 ,0 ) == 1 assert nand_gate(1 ,1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
42
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A_ = 250_004 A_ = 250_020 @require_sentencepiece @require_tokenizers class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = MBartTokenizer SCREAMING_SNAKE_CASE_ = MBartTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def UpperCamelCase( self ) -> int: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=True lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=False lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro' SCREAMING_SNAKE_CASE_ = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] SCREAMING_SNAKE_CASE_ = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE] @classmethod def UpperCamelCase( cls ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) lowerCamelCase_ = 1 return cls def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids ) lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = 10 lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ ) @require_torch def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowerCamelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' ) lowerCamelCase_ = self.tokenizer( text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' ) lowerCamelCase_ = targets['input_ids'] lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , { # A, test, EOS, en_XX 'input_ids': [[62, 3034, 2, 250004]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 250001, } , )
42
1
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ) -> bool: if num < 0: return False lowerCamelCase_ = num lowerCamelCase_ = 0 while num > 0: lowerCamelCase_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
42
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = 'ylacombe/bark-small' lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = 'en_speaker_1' lowerCamelCase_ = 'This is a test string' lowerCamelCase_ = 'speaker_embeddings_path.json' lowerCamelCase_ = 'speaker_embeddings' def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCamelCase_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCamelCase_ = 35 lowerCamelCase_ = 2 lowerCamelCase_ = 8 lowerCamelCase_ = { 'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = processor(text=self.input_string ) lowerCamelCase_ = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
42
1